From 9fc8bcf86759f6ef64d334b6153c2c137492caed Mon Sep 17 00:00:00 2001 From: Vincent Demeester Date: Wed, 18 Dec 2019 12:51:52 +0100 Subject: [PATCH] =?UTF-8?q?Use=20vendored=20version=20of=20plumbing=20inst?= =?UTF-8?q?ead=20of=20go=20get=20=F0=9F=A6=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using `go get -d` updates continuously the plumbing dependency, without really updating the vendor folder, which will mean inconsistency. Signed-off-by: Vincent Demeester --- hack/update-codegen.sh | 3 +- hack/update-deps.sh | 3 +- hack/verify-codegen.sh | 3 +- test/dummy_test.go | 27 + .../tektoncd/plumbing/scripts/README.md | 271 +++++++++ .../tektoncd/plumbing/scripts/dummy.go | 26 + .../tektoncd/plumbing/scripts/e2e-tests.sh | 428 ++++++++++++++ .../tektoncd/plumbing/scripts/library.sh | 545 ++++++++++++++++++ .../scripts/markdown-link-check-config.rc | 5 + .../plumbing/scripts/markdown-lint-config.rc | 5 + .../plumbing/scripts/presubmit-tests.sh | 362 ++++++++++++ vendor/modules.txt | 1 + 12 files changed, 1673 insertions(+), 6 deletions(-) create mode 100644 test/dummy_test.go create mode 100644 vendor/github.com/tektoncd/plumbing/scripts/README.md create mode 100644 vendor/github.com/tektoncd/plumbing/scripts/dummy.go create mode 100644 vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh create mode 100644 vendor/github.com/tektoncd/plumbing/scripts/library.sh create mode 100644 vendor/github.com/tektoncd/plumbing/scripts/markdown-link-check-config.rc create mode 100644 vendor/github.com/tektoncd/plumbing/scripts/markdown-lint-config.rc create mode 100644 vendor/github.com/tektoncd/plumbing/scripts/presubmit-tests.sh diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 64996e48644..917072a8565 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -18,8 +18,7 @@ set -o errexit set -o nounset set -o pipefail -go get -d github.com/tektoncd/plumbing -source $(go list -m -f '{{.Dir}}' github.com/tektoncd/plumbing)/scripts/library.sh +source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/library.sh SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. CODEGEN_PKG=${CODEGEN_PKG:-$(go list -m -f '{{.Dir}}' k8s.io/code-generator)} diff --git a/hack/update-deps.sh b/hack/update-deps.sh index b6b9493d87c..3fbe4ec98d8 100755 --- a/hack/update-deps.sh +++ b/hack/update-deps.sh @@ -18,8 +18,7 @@ set -o errexit set -o nounset set -o pipefail -go get -d github.com/tektoncd/plumbing -source $(go list -m -f '{{.Dir}}' github.com/tektoncd/plumbing)/scripts/library.sh +source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/library.sh cd ${REPO_ROOT_DIR} diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh index 1560c93a90c..969c2794db8 100755 --- a/hack/verify-codegen.sh +++ b/hack/verify-codegen.sh @@ -18,8 +18,7 @@ set -o errexit set -o nounset set -o pipefail -go get -d github.com/tektoncd/plumbing -source $(go list -m -f '{{.Dir}}' github.com/tektoncd/plumbing)/scripts/library.sh +source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/library.sh readonly TMP_DIFFROOT="$(mktemp -d ${REPO_ROOT_DIR}/tmpdiffroot.XXXXXX)" diff --git a/test/dummy_test.go b/test/dummy_test.go new file mode 100644 index 00000000000..dbd040eb1a7 --- /dev/null +++ b/test/dummy_test.go @@ -0,0 +1,27 @@ +// +build e2e + +// Copyright © 2018 The Tekton Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "testing" + + _ "github.com/tektoncd/plumbing/scripts" +) + +func TestDummy(t *testing.T) { + t.Log("This is required to make sure we get tektoncd/plumbing in the repository, folder vendor") +} diff --git a/vendor/github.com/tektoncd/plumbing/scripts/README.md b/vendor/github.com/tektoncd/plumbing/scripts/README.md new file mode 100644 index 00000000000..da8f9939ca4 --- /dev/null +++ b/vendor/github.com/tektoncd/plumbing/scripts/README.md @@ -0,0 +1,271 @@ +# Helper scripts + +This directory contains helper scripts used by Prow test jobs, as well as +local development scripts. + +## Using the `presubmit-tests.sh` helper script + +This is a helper script to run the presubmit tests. To use it: + +1. Source this script. + +1. [optional] Define the function `build_tests()`. If you don't define this + function, the default action for running the build tests is to: + + - check markdown files + - run `go build` on the entire repo + - run `/hack/verify-codegen.sh` (if it exists) + - check licenses in all go packages + + The markdown link checker tool doesn't check `localhost` links by default. + Its configuration file, `markdown-link-check-config.json`, lives in the + `test-infra/scripts` directory. To override it, create a file with the same + name, containing the custom config in the `/test` directory. + + The markdown lint tool ignores long lines by default. Its configuration file, + `markdown-lint-config.rc`, lives in the `test-infra/scripts` directory. To + override it, create a file with the same name, containing the custom config + in the `/test` directory. + +1. [optional] Customize the default build test runner, if you're using it. Set + the following environment variables if the default values don't fit your needs: + + - `DISABLE_MD_LINTING`: Disable linting markdown files, defaults to 0 (false). + - `DISABLE_MD_LINK_CHECK`: Disable checking links in markdown files, defaults + to 0 (false). + - `PRESUBMIT_TEST_FAIL_FAST`: Fail the presubmit test immediately if a test fails, + defaults to 0 (false). + +1. [optional] Define the functions `pre_build_tests()` and/or + `post_build_tests()`. These functions will be called before or after the + build tests (either your custom one or the default action) and will cause + the test to fail if they don't return success. + +1. [optional] Define the function `unit_tests()`. If you don't define this + function, the default action for running the unit tests is to run all go tests + in the repo. + +1. [optional] Define the functions `pre_unit_tests()` and/or + `post_unit_tests()`. These functions will be called before or after the + unit tests (either your custom one or the default action) and will cause + the test to fail if they don't return success. + +1. [optional] Define the function `integration_tests()`. If you don't define + this function, the default action for running the integration tests is to run + all `./test/e2e-*tests.sh` scripts, in sequence. + +1. [optional] Define the functions `pre_integration_tests()` and/or + `post_integration_tests()`. These functions will be called before or after the + integration tests (either your custom one or the default action) and will cause + the test to fail if they don't return success. + +1. Call the `main()` function passing `$@` (without quotes). + +Running the script without parameters, or with the `--all-tests` flag causes +all tests to be executed, in the right order (i.e., build, then unit, then +integration tests). + +Use the flags `--build-tests`, `--unit-tests` and `--integration-tests` to run +a specific set of tests. The flag `--emit-metrics` is used to emit metrics when +running the tests, and is automatically handled by the default action for +integration tests (see above). + +The script will automatically skip all presubmit tests for PRs where all changed +files are exempt of tests (e.g., a PR changing only the `OWNERS` file). + +Also, for PRs touching only markdown files, the unit and integration tests are +skipped. + +### Sample presubmit test script + +```bash +source vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh + +function post_build_tests() { + echo "Cleaning up after build tests" + rm -fr ./build-cache +} + +function unit_tests() { + make -C tests test +} + +function pre_integration_tests() { + echo "Cleaning up before integration tests" + rm -fr ./staging-area +} + +# We use the default integration test runner. + +main $@ +``` + +## Using the `e2e-tests.sh` helper script + +This is a helper script for Knative E2E test scripts. To use it: + +1. [optional] Customize the test cluster. Set the following environment variables + if the default values don't fit your needs: + + - `E2E_CLUSTER_REGION`: Cluster region, defaults to `us-central1`. + - `E2E_CLUSTER_BACKUP_REGIONS`: Space-separated list of regions to retry test cluster creation in case of stockout. Defaults to `us-west1 us-east1`. + - `E2E_CLUSTER_ZONE`: Cluster zone (e.g., `a`), defaults to none (i.e. use a regional + cluster). + - `E2E_CLUSTER_BACKUP_ZONES`: Space-separated list of zones to retry test cluster creation in case of stockout. If defined, `E2E_CLUSTER_BACKUP_REGIONS` will be ignored thus it defaults to none. + - `E2E_CLUSTER_MACHINE`: Cluster node machine type, defaults to `n1-standard-4}`. + - `E2E_MIN_CLUSTER_NODES`: Minimum number of nodes in the cluster when autoscaling, + defaults to 1. + - `E2E_MAX_CLUSTER_NODES`: Maximum number of nodes in the cluster when autoscaling, + defaults to 3. + +1. Source the script. + +1. [optional] Write the `knative_setup()` function, which will set up your + system under test (e.g., Knative Serving). This function won't be called if you + use the `--skip-knative-setup` flag. + +1. [optional] Write the `knative_teardown()` function, which will tear down your + system under test (e.g., Knative Serving). This function won't be called if you + use the `--skip-knative-setup` flag. + +1. [optional] Write the `test_setup()` function, which will set up the test + resources. + +1. [optional] Write the `test_teardown()` function, which will tear down the test + resources. + +1. [optional] Write the `cluster_setup()` function, which will set up any resources + before the test cluster is created. + +1. [optional] Write the `cluster_teardown()` function, which will tear down any + resources after the test cluster is destroyed. + +1. [optional] Write the `dump_extra_cluster_state()` function. It will be + called when a test fails, and can dump extra information about the current state + of the cluster (typically using `kubectl`). + +1. [optional] Write the `parse_flags()` function. It will be called whenever an + unrecognized flag is passed to the script, allowing you to define your own flags. + The function must return 0 if the flag is unrecognized, or the number of items + to skip in the command line if the flag was parsed successfully. For example, + return 1 for a simple flag, and 2 for a flag with a parameter. + +1. Call the `initialize()` function passing `$@` (without quotes). + +1. Write logic for the end-to-end tests. Run all go tests using `go_test_e2e()` + (or `report_go_test()` if you need a more fine-grained control) and call + `fail_test()` or `success()` if any of them failed. The environment variable + `KO_DOCKER_REPO` and `E2E_PROJECT_ID` will be set according to the test cluster. + You can also use the following boolean (0 is false, 1 is true) environment + variables for the logic: + + - `EMIT_METRICS`: true if `--emit-metrics` was passed. + + All environment variables above are marked read-only. + +**Notes:** + +1. Calling your script without arguments will create a new cluster in the GCP + project `$PROJECT_ID` and run the tests against it. + +1. Calling your script with `--run-tests` and the variable `KO_DOCKER_REPO` set + will immediately start the tests against the cluster currently configured for + `kubectl`. + +1. You can force running the tests against a specific GKE cluster version by using + the `--cluster-version` flag and passing a full version as the flag value. + +### Sample end-to-end test script + +This script will test that the latest Knative Serving nightly release works. It +defines a special flag (`--no-knative-wait`) that causes the script not to +wait for Knative Serving to be up before running the tests. It also requires that +the test cluster is created in a specific region, `us-west2`. + +```bash + +# This test requires a cluster in LA +E2E_CLUSTER_REGION=us-west2 + +source vendor/github.com/knative/test-infra/scripts/e2e-tests.sh + +function knative_setup() { + start_latest_knative_serving + if (( WAIT_FOR_KNATIVE )); then + wait_until_pods_running knative-serving || fail_test "Knative Serving not up" + fi +} + +function parse_flags() { + if [[ "$1" == "--no-knative-wait" ]]; then + WAIT_FOR_KNATIVE=0 + return 1 + fi + return 0 +} + +WAIT_FOR_KNATIVE=1 + +initialize $@ + +# TODO: use go_test_e2e to run the tests. +kubectl get pods || fail_test + +success +``` + +## Using the `release.sh` helper script + +This is a helper script for Knative release scripts. To use it: + +1. Source the script. + +1. [optional] By default, the release script will run `./test/presubmit-tests.sh` + as the release validation tests. If you need to run something else, set the + environment variable `VALIDATION_TESTS` to the executable to run. + +1. Write logic for building the release in a function named `build_release()`. + Set the environment variable `YAMLS_TO_PUBLISH` to the list of yaml files created, + space separated. Use the following boolean (0 is false, 1 is true) and string + environment variables for the logic: + + - `RELEASE_VERSION`: contains the release version if `--version` was passed. This + also overrides the value of the `TAG` variable as `v`. + - `RELEASE_BRANCH`: contains the release branch if `--branch` was passed. Otherwise + it's empty and `master` HEAD will be considered the release branch. + - `RELEASE_NOTES`: contains the filename with the release notes if `--release-notes` + was passed. The release notes is a simple markdown file. + - `RELEASE_GCS_BUCKET`: contains the GCS bucket name to store the manifests if + `--release-gcs` was passed, otherwise the default value `knative-nightly/` + will be used. It is empty if `--publish` was not passed. + - `KO_DOCKER_REPO`: contains the GCR to store the images if `--release-gcr` was + passed, otherwise the default value `gcr.io/knative-nightly` will be used. It + is set to `ko.local` if `--publish` was not passed. + - `SKIP_TESTS`: true if `--skip-tests` was passed. This is handled automatically. + - `TAG_RELEASE`: true if `--tag-release` was passed. In this case, the environment + variable `TAG` will contain the release tag in the form `vYYYYMMDD-`. + - `PUBLISH_RELEASE`: true if `--publish` was passed. In this case, the environment + variable `KO_FLAGS` will be updated with the `-L` option. + - `PUBLISH_TO_GITHUB`: true if `--version`, `--branch` and `--publish-release` + were passed. + + All boolean environment variables default to false for safety. + + All environment variables above, except `KO_FLAGS`, are marked read-only once + `main()` is called (see below). + +1. Call the `main()` function passing `$@` (without quotes). + +### Sample release script + +```bash +source vendor/github.com/knative/test-infra/scripts/release.sh + +function build_release() { + # config/ contains the manifests + ko resolve ${KO_FLAGS} -f config/ > release.yaml + YAMLS_TO_PUBLISH="release.yaml" +} + +main $@ +``` diff --git a/vendor/github.com/tektoncd/plumbing/scripts/dummy.go b/vendor/github.com/tektoncd/plumbing/scripts/dummy.go new file mode 100644 index 00000000000..c87432b2640 --- /dev/null +++ b/vendor/github.com/tektoncd/plumbing/scripts/dummy.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scripts + +import ( + "fmt" +) + +func main() { + fmt.Println("This is a dummy go file so `go dep` or go modules can be used with tektoncd/plumbing/scripts") + fmt.Println("This file can be safely removed if one day this directory contains real, useful go code") +} diff --git a/vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh b/vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh new file mode 100644 index 00000000000..6fc95dab254 --- /dev/null +++ b/vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh @@ -0,0 +1,428 @@ +#!/bin/bash + +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a helper script for Tekton E2E test scripts. +# See README.md for instructions on how to use it. + +source $(dirname ${BASH_SOURCE})/library.sh + +# Build a resource name based on $E2E_BASE_NAME, a suffix and $BUILD_NUMBER. +# Restricts the name length to 40 chars (the limit for resource names in GCP). +# Name will have the form $E2E_BASE_NAME-$BUILD_NUMBER. +# Parameters: $1 - name suffix +function build_resource_name() { + local prefix=${E2E_BASE_NAME}-$1 + local suffix=${BUILD_NUMBER} + # Restrict suffix length to 20 chars + if [[ -n "${suffix}" ]]; then + suffix=${suffix:${#suffix}<20?0:-20} + fi + local name="${prefix:0:20}${suffix}" + # Ensure name doesn't end with "-" + echo "${name%-}" +} + +# Test cluster parameters + +# Configurable parameters +# export E2E_CLUSTER_REGION and E2E_CLUSTER_ZONE as they're used in the cluster setup subprocess +export E2E_CLUSTER_REGION=${E2E_CLUSTER_REGION:-us-central1} +# By default we use regional clusters. +export E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-} + +# Default backup regions in case of stockouts; by default we don't fall back to a different zone in the same region +readonly E2E_CLUSTER_BACKUP_REGIONS=${E2E_CLUSTER_BACKUP_REGIONS:-us-west1 us-east1} +readonly E2E_CLUSTER_BACKUP_ZONES=${E2E_CLUSTER_BACKUP_ZONES:-} + +readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-n1-standard-4} +readonly E2E_GKE_ENVIRONMENT=${E2E_GKE_ENVIRONMENT:-prod} +readonly E2E_GKE_COMMAND_GROUP=${E2E_GKE_COMMAND_GROUP:-beta} + +# Each tekton repository may have a different cluster size requirement here, +# so we allow calling code to set these parameters. If they are not set we +# use some sane defaults. +readonly E2E_MIN_CLUSTER_NODES=${E2E_MIN_CLUSTER_NODES:-1} +readonly E2E_MAX_CLUSTER_NODES=${E2E_MAX_CLUSTER_NODES:-3} + +readonly E2E_BASE_NAME="t${REPO_NAME}" +readonly E2E_CLUSTER_NAME=$(build_resource_name e2e-cls) +readonly E2E_NETWORK_NAME=$(build_resource_name e2e-net) +readonly TEST_RESULT_FILE=/tmp/${E2E_BASE_NAME}-e2e-result + +# Flag whether test is using a boskos GCP project +IS_BOSKOS=0 + +# Tear down the test resources. +function teardown_test_resources() { + # On boskos, save time and don't teardown as the cluster will be destroyed anyway. + (( IS_BOSKOS )) && return + header "Tearing down test environment" + function_exists test_teardown && test_teardown + (( ! SKIP_TEKTON_SETUP )) && function_exists tekton_teardown && tekton_teardown + # Delete the kubernetes source downloaded by kubetest + rm -fr kubernetes kubernetes.tar.gz +} + +# Run the given E2E tests. Assume tests are tagged e2e, unless `-tags=XXX` is passed. +# Parameters: $1..$n - any go test flags, then directories containing the tests to run. +function go_test_e2e() { + local test_options="" + local go_options="" + (( EMIT_METRICS )) && test_options="-emitmetrics" + [[ ! " $@" == *" -tags="* ]] && go_options="-tags=e2e" + report_go_test -v -count=1 ${go_options} $@ ${test_options} +} + +# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too. +# This is intended to be called when a test fails to provide debugging information. +function dump_cluster_state() { + echo "***************************************" + echo "*** E2E TEST FAILED ***" + echo "*** Start of information dump ***" + echo "***************************************" + echo ">>> All resources:" + kubectl get all --all-namespaces + echo ">>> Services:" + kubectl get services --all-namespaces + echo ">>> Events:" + kubectl get events --all-namespaces + function_exists dump_extra_cluster_state && dump_extra_cluster_state + echo "***************************************" + echo "*** E2E TEST FAILED ***" + echo "*** End of information dump ***" + echo "***************************************" +} + +# On a Prow job, save some metadata about the test for Testgrid. +function save_metadata() { + (( ! IS_PROW )) && return + local geo_key="Region" + local geo_value="${E2E_CLUSTER_REGION}" + if [[ -n "${E2E_CLUSTER_ZONE}" ]]; then + geo_key="Zone" + geo_value="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}" + fi + local cluster_version="$(gcloud container clusters list --project=${E2E_PROJECT_ID} --format='value(currentMasterVersion)')" + cat << EOF > ${ARTIFACTS}/metadata.json +{ + "E2E:${geo_key}": "${geo_value}", + "E2E:Machine": "${E2E_CLUSTER_MACHINE}", + "E2E:Version": "${cluster_version}", + "E2E:MinNodes": "${E2E_MIN_CLUSTER_NODES}", + "E2E:MaxNodes": "${E2E_MAX_CLUSTER_NODES}" +} +EOF +} + +# Delete target pools and health checks that might have leaked. +# See https://github.com/knative/serving/issues/959 for details. +# TODO(adrcunha): Remove once the leak issue is resolved. +function delete_leaked_network_resources() { + # On boskos, don't bother with leaks as the janitor will delete everything in the project. + (( IS_BOSKOS )) && return + # Ensure we're using the GCP project used by kubetest + local gcloud_project="$(gcloud config get-value project)" + local http_health_checks="$(gcloud compute target-pools list \ + --project=${gcloud_project} --format='value(healthChecks)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \ + grep httpHealthChecks | tr '\n' ' ')" + local target_pools="$(gcloud compute target-pools list \ + --project=${gcloud_project} --format='value(name)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \ + tr '\n' ' ')" + if [[ -n "${target_pools}" ]]; then + echo "Found leaked target pools, deleting" + gcloud compute forwarding-rules delete -q --project=${gcloud_project} --region=${E2E_CLUSTER_REGION} ${target_pools} + gcloud compute target-pools delete -q --project=${gcloud_project} --region=${E2E_CLUSTER_REGION} ${target_pools} + fi + if [[ -n "${http_health_checks}" ]]; then + echo "Found leaked health checks, deleting" + gcloud compute http-health-checks delete -q --project=${gcloud_project} ${http_health_checks} + fi +} + +# Create a test cluster with kubetest and call the current script again. +function create_test_cluster() { + # Fail fast during setup. + set -o errexit + set -o pipefail + + if function_exists cluster_setup; then + cluster_setup || fail_test "cluster setup failed" + fi + + echo "Cluster will have a minimum of ${E2E_MIN_CLUSTER_NODES} and a maximum of ${E2E_MAX_CLUSTER_NODES} nodes." + + # Smallest cluster required to run the end-to-end-tests + local CLUSTER_CREATION_ARGS=( + --gke-create-command="container clusters create --quiet --enable-autoscaling --min-nodes=${E2E_MIN_CLUSTER_NODES} --max-nodes=${E2E_MAX_CLUSTER_NODES} --scopes=cloud-platform --enable-basic-auth --no-issue-client-certificate ${EXTRA_CLUSTER_CREATION_FLAGS[@]}" + --gke-shape={\"default\":{\"Nodes\":${E2E_MIN_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}} + --provider=gke + --deployment=gke + --cluster="${E2E_CLUSTER_NAME}" + --gcp-network="${E2E_NETWORK_NAME}" + --gke-environment="${E2E_GKE_ENVIRONMENT}" + --gke-command-group="${E2E_GKE_COMMAND_GROUP}" + --test=false + ) + if (( ! IS_BOSKOS )); then + CLUSTER_CREATION_ARGS+=(--gcp-project=${GCP_PROJECT}) + fi + # SSH keys are not used, but kubetest checks for their existence. + # Touch them so if they don't exist, empty files are create to satisfy the check. + mkdir -p $HOME/.ssh + touch $HOME/.ssh/google_compute_engine.pub + touch $HOME/.ssh/google_compute_engine + # Assume test failed (see details in set_test_return_code()). + set_test_return_code 1 + local gcloud_project="${GCP_PROJECT}" + [[ -z "${gcloud_project}" ]] && gcloud_project="$(gcloud config get-value project)" + echo "gcloud project is ${gcloud_project}" + (( IS_BOSKOS )) && echo "Using boskos for the test cluster" + [[ -n "${GCP_PROJECT}" ]] && echo "GCP project for test cluster is ${GCP_PROJECT}" + echo "Test script is ${E2E_SCRIPT}" + # Set arguments for this script again + local test_cmd_args="--run-tests" + (( EMIT_METRICS )) && test_cmd_args+=" --emit-metrics" + (( SKIP_TEKTON_SETUP )) && test_cmd_args+=" --skip-tekton-setup" + [[ -n "${GCP_PROJECT}" ]] && test_cmd_args+=" --gcp-project ${GCP_PROJECT}" + [[ -n "${E2E_SCRIPT_CUSTOM_FLAGS[@]}" ]] && test_cmd_args+=" ${E2E_SCRIPT_CUSTOM_FLAGS[@]}" + local extra_flags=() + # If using boskos, save time and let it tear down the cluster + (( ! IS_BOSKOS )) && extra_flags+=(--down) + create_test_cluster_with_retries "${CLUSTER_CREATION_ARGS[@]}" \ + --up \ + --extract "${E2E_CLUSTER_VERSION}" \ + --gcp-node-image "${SERVING_GKE_IMAGE}" \ + --test-cmd "${E2E_SCRIPT}" \ + --test-cmd-args "${test_cmd_args}" \ + ${extra_flags[@]} \ + ${EXTRA_KUBETEST_FLAGS[@]} + echo "Test subprocess exited with code $?" + # Ignore any errors below, this is a best-effort cleanup and shouldn't affect the test result. + set +o errexit + function_exists cluster_teardown && cluster_teardown + delete_leaked_network_resources + local result=$(get_test_return_code) + echo "Artifacts were written to ${ARTIFACTS}" + echo "Test result code is ${result}" + exit ${result} +} + +# Retry backup regions/zones if cluster creations failed due to stockout. +# Parameters: $1..$n - any kubetest flags other than geo flag. +function create_test_cluster_with_retries() { + local cluster_creation_log=/tmp/${E2E_BASE_NAME}-cluster_creation-log + # zone_not_provided is a placeholder for e2e_cluster_zone to make for loop below work + local zone_not_provided="zone_not_provided" + + local e2e_cluster_regions=(${E2E_CLUSTER_REGION}) + local e2e_cluster_zones=(${E2E_CLUSTER_ZONE}) + + if [[ -n "${E2E_CLUSTER_BACKUP_ZONES}" ]]; then + e2e_cluster_zones+=(${E2E_CLUSTER_BACKUP_ZONES}) + elif [[ -n "${E2E_CLUSTER_BACKUP_REGIONS}" ]]; then + e2e_cluster_regions+=(${E2E_CLUSTER_BACKUP_REGIONS}) + e2e_cluster_zones=(${zone_not_provided}) + else + echo "No backup region/zone set, cluster creation will fail in case of stockout" + fi + + for e2e_cluster_region in "${e2e_cluster_regions[@]}"; do + for e2e_cluster_zone in "${e2e_cluster_zones[@]}"; do + E2E_CLUSTER_REGION=${e2e_cluster_region} + E2E_CLUSTER_ZONE=${e2e_cluster_zone} + [[ "${E2E_CLUSTER_ZONE}" == "${zone_not_provided}" ]] && E2E_CLUSTER_ZONE="" + + local geoflag="--gcp-region=${E2E_CLUSTER_REGION}" + [[ -n "${E2E_CLUSTER_ZONE}" ]] && geoflag="--gcp-zone=${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}" + + header "Creating test cluster in $E2E_CLUSTER_REGION $E2E_CLUSTER_ZONE" + # Don't fail test for kubetest, as it might incorrectly report test failure + # if teardown fails (for details, see success() below) + set +o errexit + { run_go_tool k8s.io/test-infra/kubetest \ + kubetest "$@" ${geoflag}; } 2>&1 | tee ${cluster_creation_log} + + # Exit if test succeeded + [[ "$(get_test_return_code)" == "0" ]] && return + # If test failed not because of cluster creation stockout, return + [[ -z "$(grep -Eio 'does not have enough resources to fulfill the request' ${cluster_creation_log})" ]] && return + done + done +} + +# Setup the test cluster for running the tests. +function setup_test_cluster() { + # Fail fast during setup. + set -o errexit + set -o pipefail + + header "Setting up test cluster" + + # Set the actual project the test cluster resides in + # It will be a project assigned by Boskos if test is running on Prow, + # otherwise will be ${GCP_PROJECT} set up by user. + readonly export E2E_PROJECT_ID="$(gcloud config get-value project)" + + # Save some metadata about cluster creation for using in prow and testgrid + save_metadata + + local k8s_user=$(gcloud config get-value core/account) + local k8s_cluster=$(kubectl config current-context) + + # If cluster admin role isn't set, this is a brand new cluster + # Setup the admin role and also KO_DOCKER_REPO + if [[ -z "$(kubectl get clusterrolebinding cluster-admin-binding 2> /dev/null)" ]]; then + acquire_cluster_admin_role ${k8s_user} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_REGION} ${E2E_CLUSTER_ZONE} + kubectl config set-context ${k8s_cluster} --namespace=default + export KO_DOCKER_REPO=gcr.io/${E2E_PROJECT_ID}/${E2E_BASE_NAME}-e2e-img + fi + + echo "- Project is ${E2E_PROJECT_ID}" + echo "- Cluster is ${k8s_cluster}" + echo "- User is ${k8s_user}" + echo "- Docker is ${KO_DOCKER_REPO}" + + export KO_DATA_PATH="${REPO_ROOT_DIR}/.git" + + trap teardown_test_resources EXIT + + # Handle failures ourselves, so we can dump useful info. + set +o errexit + set +o pipefail + + if (( ! SKIP_TEKTON_SETUP )) && function_exists tekton_setup; then + tekton_setup || fail_test "Tekton setup failed" + fi + if function_exists test_setup; then + test_setup || fail_test "test setup failed" + fi +} + +# Gets the exit of the test script. +# For more details, see set_test_return_code(). +function get_test_return_code() { + echo $(cat ${TEST_RESULT_FILE}) +} + +# Set the return code that the test script will return. +# Parameters: $1 - return code (0-255) +function set_test_return_code() { + # kubetest teardown might fail and thus incorrectly report failure of the + # script, even if the tests pass. + # We store the real test result to return it later, ignoring any teardown + # failure in kubetest. + # TODO(adrcunha): Get rid of this workaround. + echo -n "$1"> ${TEST_RESULT_FILE} +} + +# Signal (as return code and in the logs) that all E2E tests passed. +function success() { + set_test_return_code 0 + echo "**************************************" + echo "*** E2E TESTS PASSED ***" + echo "**************************************" + exit 0 +} + +# Exit test, dumping current state info. +# Parameters: $1 - error message (optional). +function fail_test() { + set_test_return_code 1 + [[ -n $1 ]] && echo "ERROR: $1" + dump_cluster_state + exit 1 +} + +RUN_TESTS=0 +EMIT_METRICS=0 +SKIP_TEKTON_SETUP=0 +GCP_PROJECT="" +E2E_SCRIPT="" +E2E_CLUSTER_VERSION="" +EXTRA_CLUSTER_CREATION_FLAGS=() +EXTRA_KUBETEST_FLAGS=() +E2E_SCRIPT_CUSTOM_FLAGS=() + +# Parse flags and initialize the test cluster. +function initialize() { + E2E_SCRIPT="$(get_canonical_path $0)" + E2E_CLUSTER_VERSION="${SERVING_GKE_VERSION}" + + cd ${REPO_ROOT_DIR} + while [[ $# -ne 0 ]]; do + local parameter=$1 + # Try parsing flag as a custom one. + if function_exists parse_flags; then + parse_flags $@ + local skip=$? + if [[ ${skip} -ne 0 ]]; then + # Skip parsed flag (and possibly argument) and continue + # Also save it to it's passed through to the test script + for ((i=1;i<=skip;i++)); do + E2E_SCRIPT_CUSTOM_FLAGS+=("$1") + shift + done + continue + fi + fi + # Try parsing flag as a standard one. + case ${parameter} in + --run-tests) RUN_TESTS=1 ;; + --emit-metrics) EMIT_METRICS=1 ;; + --skip-tekton-setup) SKIP_TEKTON_SETUP=1 ;; + *) + [[ $# -ge 2 ]] || abort "missing parameter after $1" + shift + case ${parameter} in + --gcp-project) GCP_PROJECT=$1 ;; + --cluster-version) E2E_CLUSTER_VERSION=$1 ;; + --cluster-creation-flag) EXTRA_CLUSTER_CREATION_FLAGS+=($1) ;; + --kubetest-flag) EXTRA_KUBETEST_FLAGS+=($1) ;; + *) abort "unknown option ${parameter}" ;; + esac + esac + shift + done + + # Use PROJECT_ID if set, unless --gcp-project was used. + if [[ -n "${PROJECT_ID:-}" && -z "${GCP_PROJECT}" ]]; then + echo "\$PROJECT_ID is set to '${PROJECT_ID}', using it to run the tests" + GCP_PROJECT="${PROJECT_ID}" + fi + if (( ! IS_PROW )) && [[ -z "${GCP_PROJECT}" ]]; then + abort "set \$PROJECT_ID or use --gcp-project to select the GCP project where the tests are run" + fi + + (( IS_PROW )) && [[ -z "${GCP_PROJECT}" ]] && IS_BOSKOS=1 + + # Safety checks + is_protected_gcr ${KO_DOCKER_REPO} && \ + abort "\$KO_DOCKER_REPO set to ${KO_DOCKER_REPO}, which is forbidden" + + readonly RUN_TESTS + readonly EMIT_METRICS + readonly GCP_PROJECT + readonly IS_BOSKOS + readonly EXTRA_CLUSTER_CREATION_FLAGS + readonly EXTRA_KUBETEST_FLAGS + readonly SKIP_TEKTON_SETUP + + if (( ! RUN_TESTS )); then + create_test_cluster + else + setup_test_cluster + fi +} diff --git a/vendor/github.com/tektoncd/plumbing/scripts/library.sh b/vendor/github.com/tektoncd/plumbing/scripts/library.sh new file mode 100644 index 00000000000..48a0100c661 --- /dev/null +++ b/vendor/github.com/tektoncd/plumbing/scripts/library.sh @@ -0,0 +1,545 @@ +#!/bin/bash + +# Copyright 2018 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a collection of useful bash functions and constants, intended +# to be used in test scripts and the like. It doesn't do anything when +# called from command line. + +# Default GKE version to be used with Tekton Serving +readonly SERVING_GKE_VERSION=gke-latest +readonly SERVING_GKE_IMAGE=cos + +# Conveniently set GOPATH if unset +if [[ -z "${GOPATH:-}" ]]; then + export GOPATH="$(go env GOPATH)" + if [[ -z "${GOPATH}" ]]; then + echo "WARNING: GOPATH not set and go binary unable to provide it" + fi +fi + +# Useful environment variables +[[ -n "${PROW_JOB_ID:-}" ]] && IS_PROW=1 || IS_PROW=0 +readonly IS_PROW +readonly REPO_ROOT_DIR="${REPO_ROOT_DIR:-$(git rev-parse --show-toplevel 2> /dev/null)}" +readonly REPO_NAME="${REPO_NAME:-$(basename ${REPO_ROOT_DIR} 2> /dev/null)}" + +# Set ARTIFACTS to an empty temp dir if unset +if [[ -z "${ARTIFACTS:-}" ]]; then + export ARTIFACTS="$(mktemp -d)" +fi + +# On a Prow job, redirect stderr to stdout so it's synchronously added to log +(( IS_PROW )) && exec 2>&1 + +# Print error message and exit 1 +# Parameters: $1..$n - error message to be displayed +function abort() { + echo "error: $@" + exit 1 +} + +# Display a box banner. +# Parameters: $1 - character to use for the box. +# $2 - banner message. +function make_banner() { + local msg="$1$1$1$1 $2 $1$1$1$1" + local border="${msg//[-0-9A-Za-z _.,\/()]/$1}" + echo -e "${border}\n${msg}\n${border}" +} + +# Simple header for logging purposes. +function header() { + local upper="$(echo $1 | tr a-z A-Z)" + make_banner "=" "${upper}" +} + +# Simple subheader for logging purposes. +function subheader() { + make_banner "-" "$1" +} + +# Simple warning banner for logging purposes. +function warning() { + make_banner "!" "$1" +} + +# Checks whether the given function exists. +function function_exists() { + [[ "$(type -t $1)" == "function" ]] +} + +# Waits until the given object doesn't exist. +# Parameters: $1 - the kind of the object. +# $2 - object's name. +# $3 - namespace (optional). +function wait_until_object_does_not_exist() { + local KUBECTL_ARGS="get $1 $2" + local DESCRIPTION="$1 $2" + + if [[ -n $3 ]]; then + KUBECTL_ARGS="get -n $3 $1 $2" + DESCRIPTION="$1 $3/$2" + fi + echo -n "Waiting until ${DESCRIPTION} does not exist" + for i in {1..150}; do # timeout after 5 minutes + if ! kubectl ${KUBECTL_ARGS} > /dev/null 2>&1; then + echo -e "\n${DESCRIPTION} does not exist" + return 0 + fi + echo -n "." + sleep 2 + done + echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist" + kubectl ${KUBECTL_ARGS} + return 1 +} + +# Waits until all pods are running in the given namespace. +# Parameters: $1 - namespace. +function wait_until_pods_running() { + echo -n "Waiting until all pods in namespace $1 are up" + for i in {1..150}; do # timeout after 5 minutes + local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)" + # All pods must be running + local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l) + if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then + local all_ready=1 + while read pod ; do + local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`) + # All containers must be ready + [[ -z ${status[0]} ]] && all_ready=0 && break + [[ -z ${status[1]} ]] && all_ready=0 && break + [[ ${status[0]} -lt 1 ]] && all_ready=0 && break + [[ ${status[1]} -lt 1 ]] && all_ready=0 && break + [[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break + done <<< $(echo "${pods}" | grep -v Completed) + if (( all_ready )); then + echo -e "\nAll pods are up:\n${pods}" + return 0 + fi + fi + echo -n "." + sleep 2 + done + echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}" + return 1 +} + +# Waits until all batch jobs complete in the given namespace. +# Parameters: $1 - namespace. +function wait_until_batch_job_complete() { + echo -n "Waiting until all batch jobs in namespace $1 run to completion." + for i in {1..150}; do # timeout after 5 minutes + local jobs=$(kubectl get jobs -n $1 --no-headers \ + -ocustom-columns='n:{.metadata.name},c:{.spec.completions},s:{.status.succeeded}') + # All jobs must be complete + local not_complete=$(echo "${jobs}" | awk '{if ($2!=$3) print $0}' | wc -l) + if [[ ${not_complete} -eq 0 ]]; then + echo -e "\nAll jobs are complete:\n${jobs}" + return 0 + fi + echo -n "." + sleep 2 + done + echo -e "\n\nERROR: timeout waiting for jobs to complete\n${jobs}" + return 1 +} + +# Waits until the given service has an external address (IP/hostname). +# Parameters: $1 - namespace. +# $2 - service name. +function wait_until_service_has_external_ip() { + echo -n "Waiting until service $2 in namespace $1 has an external address (IP/hostname)" + for i in {1..150}; do # timeout after 15 minutes + local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + if [[ -n "${ip}" ]]; then + echo -e "\nService $2.$1 has IP $ip" + return 0 + fi + local hostname=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + if [[ -n "${hostname}" ]]; then + echo -e "\nService $2.$1 has hostname $hostname" + return 0 + fi + echo -n "." + sleep 6 + done + echo -e "\n\nERROR: timeout waiting for service $2.$1 to have an external address" + kubectl get pods -n $1 + return 1 +} + +# Waits for the endpoint to be routable. +# Parameters: $1 - External ingress IP address. +# $2 - cluster hostname. +function wait_until_routable() { + echo -n "Waiting until cluster $2 at $1 has a routable endpoint" + for i in {1..150}; do # timeout after 5 minutes + local val=$(curl -H "Host: $2" "http://$1" 2>/dev/null) + if [[ -n "$val" ]]; then + echo -e "\nEndpoint is now routable" + return 0 + fi + echo -n "." + sleep 2 + done + echo -e "\n\nERROR: Timed out waiting for endpoint to be routable" + return 1 +} + +# Returns the name of the first pod of the given app. +# Parameters: $1 - app name. +# $2 - namespace (optional). +function get_app_pod() { + local pods=($(get_app_pods $1 $2)) + echo "${pods[0]}" +} + +# Returns the name of all pods of the given app. +# Parameters: $1 - app name. +# $2 - namespace (optional). +function get_app_pods() { + local namespace="" + [[ -n $2 ]] && namespace="-n $2" + kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[*].metadata.name}" +} + +# Capitalize the first letter of each word. +# Parameters: $1..$n - words to capitalize. +function capitalize() { + local capitalized=() + for word in $@; do + local initial="$(echo ${word:0:1}| tr 'a-z' 'A-Z')" + capitalized+=("${initial}${word:1}") + done + echo "${capitalized[@]}" +} + +# Dumps pod logs for the given app. +# Parameters: $1 - app name. +# $2 - namespace. +function dump_app_logs() { + echo ">>> ${REPO_NAME_FORMATTED} $1 logs:" + for pod in $(get_app_pods "$1" "$2") + do + echo ">>> Pod: $pod" + kubectl -n "$2" logs "$pod" -c "$1" + done +} + +# Sets the given user as cluster admin. +# Parameters: $1 - user +# $2 - cluster name +# $3 - cluster region +# $4 - cluster zone, optional +function acquire_cluster_admin_role() { + echo "Acquiring cluster-admin role for user '$1'" + local geoflag="--region=$3" + [[ -n $4 ]] && geoflag="--zone=$3-$4" + # Get the password of the admin and use it, as the service account (or the user) + # might not have the necessary permission. + local password=$(gcloud --format="value(masterAuth.password)" \ + container clusters describe $2 ${geoflag}) + if [[ -n "${password}" ]]; then + # Cluster created with basic authentication + kubectl config set-credentials cluster-admin \ + --username=admin --password=${password} + else + local cert=$(mktemp) + local key=$(mktemp) + echo "Certificate in ${cert}, key in ${key}" + gcloud --format="value(masterAuth.clientCertificate)" \ + container clusters describe $2 ${geoflag} | base64 -d > ${cert} + gcloud --format="value(masterAuth.clientKey)" \ + container clusters describe $2 ${geoflag} | base64 -d > ${key} + kubectl config set-credentials cluster-admin \ + --client-certificate=${cert} --client-key=${key} + fi + kubectl config set-context $(kubectl config current-context) \ + --user=cluster-admin + kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole=cluster-admin \ + --user=$1 + # Reset back to the default account + gcloud container clusters get-credentials \ + $2 ${geoflag} --project $(gcloud config get-value project) +} + +# Runs a go test and generate a junit summary. +# Parameters: $1... - parameters to go test +function report_go_test() { + # Run tests in verbose mode to capture details. + # go doesn't like repeating -v, so remove if passed. + local args=" $@ " + local go_test="go test -race -v ${args/ -v / }" + # Just run regular go tests if not on Prow. + echo "Running tests with '${go_test}'" + local report=$(mktemp) + ${go_test} | tee ${report} + local failed=( ${PIPESTATUS[@]} ) + [[ ${failed[0]} -eq 0 ]] && failed=${failed[1]} || failed=${failed[0]} + echo "Finished run, return code is ${failed}" + # Install go-junit-report if necessary. + run_go_tool github.com/jstemmer/go-junit-report go-junit-report --help > /dev/null 2>&1 + local xml=$(mktemp ${ARTIFACTS}/junit_XXXXXXXX.xml) + cat ${report} \ + | go-junit-report \ + | sed -e "s#\"github.com/tektoncd/${REPO_NAME}/#\"#g" \ + > ${xml} + echo "XML report written to ${xml}" + if (( ! IS_PROW )); then + # Keep the suffix, so files are related. + local logfile=${xml/junit_/go_test_} + logfile=${logfile/.xml/.log} + cp ${report} ${logfile} + echo "Test log written to ${logfile}" + fi + return ${failed} +} + +# Run a go tool, installing it first if necessary. +# Parameters: $1 - tool package/dir for go get/install. +# $2 - tool to run. +# $3..$n - parameters passed to the tool. +function run_go_tool() { + local tool=$2 + if [[ -z "$(which ${tool})" ]]; then + local action=get + [[ $1 =~ ^[\./].* ]] && action=install + go ${action} $1 + fi + shift 2 + ${tool} "$@" +} + +# Run dep-collector to update licenses. +# Parameters: $1 - output file, relative to repo root dir. +# $2...$n - directories and files to inspect. +function update_licenses() { + cd ${REPO_ROOT_DIR} || return 1 + local dst=$1 + shift + if [ -f "${REPO_ROOT_DIR}/go.mod" ]; then + run_go_tool github.com/google/go-licenses go-licenses save ./... --save_path=${dst} --force + # Hack to make sure directories retain write permissions after save. This + # can happen if the directory being copied is a Go module. + # See https://github.com/google/go-licenses/issues/11 + chmod +w $(find ${dst} -type d) + else + run_go_tool ./vendor/github.com/knative/test-infra/tools/dep-collector dep-collector $@ > ./${dst} + fi +} + +# Run dep-collector to check for forbidden liceses. +# Parameters: $1...$n - directories and files to inspect. +function check_licenses() { + if [ -f "${REPO_ROOT_DIR}/vendor" ]; then + run_go_tool github.com/google/go-licenses go-licenses check ./... + else + # Fetch the google/licenseclassifier for its license db + go get -u github.com/google/licenseclassifier + # Check that we don't have any forbidden licenses in our images. + run_go_tool ./vendor/github.com/knative/test-infra/tools/dep-collector dep-collector -check $@ + fi +} + +# Run the given linter on the given files, checking it exists first. +# Parameters: $1 - tool +# $2 - tool purpose (for error message if tool not installed) +# $3 - tool parameters (quote if multiple parameters used) +# $4..$n - files to run linter on +function run_lint_tool() { + local checker=$1 + local params=$3 + if ! hash ${checker} 2>/dev/null; then + warning "${checker} not installed, not $2" + return 127 + fi + shift 3 + local failed=0 + for file in $@; do + ${checker} ${params} ${file} || failed=1 + done + return ${failed} +} + +# Check links in the given markdown files. +# Parameters: $1...$n - files to inspect +function check_links_in_markdown() { + # https://github.com/raviqqe/liche + local config="${REPO_ROOT_DIR}/test/markdown-link-check-config.rc" + [[ ! -e ${config} ]] && config="${_PLUMBING_SCRIPTS_DIR}/markdown-link-check-config.rc" + local options="$(grep '^-' ${config} | tr \"\n\" ' ')" + run_lint_tool liche "checking links in markdown files" "-d ${REPO_ROOT_DIR} ${options}" $@ +} + +# Check format of the given markdown files. +# Parameters: $1..$n - files to inspect +function lint_markdown() { + # https://github.com/markdownlint/markdownlint + local config="${REPO_ROOT_DIR}/test/markdown-lint-config.rc" + [[ ! -e ${config} ]] && config="${_PLUMBING_SCRIPTS_DIR}/markdown-lint-config.rc" + run_lint_tool mdl "linting markdown files" "-c ${config}" $@ +} + +# Return whether the given parameter is an integer. +# Parameters: $1 - integer to check +function is_int() { + [[ -n $1 && $1 =~ ^[0-9]+$ ]] +} + +# Return whether the given parameter is the tekton release/nightly GCF. +# Parameters: $1 - full GCR name, e.g. gcr.io/tekton-foo-bar +function is_protected_gcr() { + [[ -n $1 && "$1" =~ "^gcr.io/tekton-(releases|nightly)/?$" ]] +} + +# Remove symlinks in a path that are broken or lead outside the repo. +# Parameters: $1 - path name, e.g. vendor +function remove_broken_symlinks() { + for link in $(find $1 -type l); do + # Remove broken symlinks + if [[ ! -e ${link} ]]; then + unlink ${link} + continue + fi + # Get canonical path to target, remove if outside the repo + local target="$(ls -l ${link})" + target="${target##* -> }" + [[ ${target} == /* ]] || target="./${target}" + target="$(cd `dirname ${link}` && cd ${target%/*} && echo $PWD/${target##*/})" + if [[ ${target} != *github.com/tektoncd/* ]]; then + unlink ${link} + continue + fi + done +} + +# Return whether the given parameter is tekton-tests. +# Parameters: $1 - project name +function is_protected_project() { + [[ -n "$1" && "$1" == "tekton-tests" ]] +} + +# Returns the canonical path of a filesystem object. +# Parameters: $1 - path to return in canonical form +# $2 - base dir for relative links; optional, defaults to current +function get_canonical_path() { + # We don't use readlink because it's not available on every platform. + local path=$1 + local pwd=${2:-.} + [[ ${path} == /* ]] || path="${pwd}/${path}" + echo "$(cd ${path%/*} && echo $PWD/${path##*/})" +} + +# Initializations that depend on previous functions. +# These MUST come last. + +readonly _PLUMBING_SCRIPTS_DIR="$(dirname $(get_canonical_path ${BASH_SOURCE[0]}))" +readonly REPO_NAME_FORMATTED="Tekton $(capitalize ${REPO_NAME//-/})" + +# Helper functions to run YAML tests +# Taken from tektoncd/pipeline test/e2e-common.sh +function validate_run() { + local tests_finished=0 + for i in {1..60}; do + local finished="$(kubectl get $1.tekton.dev --output=jsonpath='{.items[*].status.conditions[*].status}')" + if [[ ! "$finished" == *"Unknown"* ]]; then + tests_finished=1 + break + fi + sleep 10 + done + + return ${tests_finished} +} + +function check_results() { + local failed=0 + results="$(kubectl get $1.tekton.dev --output=jsonpath='{range .items[*]}{.metadata.name}={.status.conditions[*].type}{.status.conditions[*].status}{" "}{end}')" + for result in ${results}; do + if [[ ! "${result,,}" == *"=succeededtrue" ]]; then + echo "ERROR: test ${result} but should be succeededtrue" + failed=1 + fi + done + + return ${failed} +} + +function create_resources() { + local resource=$1 + echo ">> Creating resources ${resource}" + + # Applying the resources, either *taskruns or * *pipelineruns + for file in $(find ${REPO_ROOT_DIR}/examples/${resource}s/ -name *.yaml | sort); do + perl -p -e 's/gcr.io\/christiewilson-catfactory/$ENV{KO_DOCKER_REPO}/g' ${file} | ko apply -f - || return 1 + done +} + +function run_tests() { + local resource=$1 + + # Wait for tests to finish. + echo ">> Waiting for tests to finish for ${resource}" + if validate_run $resource; then + echo "ERROR: tests timed out" + fi + + # Check that tests passed. + echo ">> Checking test results for ${resource}" + if check_results $resource; then + echo ">> All YAML tests passed" + return 0 + fi + return 1 +} + +function run_yaml_tests() { + echo ">> Starting tests for the resource ${1}" + create_resources ${1} + if ! run_tests ${1}; then + return 1 + fi + return 0 +} + +function output_yaml_test_results() { + # If formatting fails for any reason, use yaml as a fall back. + kubectl get $1.tekton.dev -o=custom-columns-file=${REPO_ROOT_DIR}/test/columns.txt || \ + kubectl get $1.tekton.dev -oyaml +} + +function output_pods_logs() { + echo ">>> $1" + kubectl get $1.tekton.dev -o yaml + local runs=$(kubectl get $1.tekton.dev --output=jsonpath="{.items[*].metadata.name}") + set +e + for run in ${runs}; do + echo ">>>> $1 ${run}" + case "$1" in + "taskrun") + tkn taskrun logs ${run} + ;; + "pipelinerun") + tkn pipelinerun logs ${run} + ;; + esac + done + set -e + echo ">>>> Pods" + kubectl get pods -o yaml +} diff --git a/vendor/github.com/tektoncd/plumbing/scripts/markdown-link-check-config.rc b/vendor/github.com/tektoncd/plumbing/scripts/markdown-link-check-config.rc new file mode 100644 index 00000000000..9d802a0d486 --- /dev/null +++ b/vendor/github.com/tektoncd/plumbing/scripts/markdown-link-check-config.rc @@ -0,0 +1,5 @@ +# For help, see +# https://github.com/raviqqe/liche/blob/master/README.md + +# Don't check localhost links +-x "^https?://localhost($|[:/].*)" diff --git a/vendor/github.com/tektoncd/plumbing/scripts/markdown-lint-config.rc b/vendor/github.com/tektoncd/plumbing/scripts/markdown-lint-config.rc new file mode 100644 index 00000000000..461f891a26d --- /dev/null +++ b/vendor/github.com/tektoncd/plumbing/scripts/markdown-lint-config.rc @@ -0,0 +1,5 @@ +# For help, see +# https://github.com/markdownlint/markdownlint/blob/master/docs/configuration.md + +# Ignore long lines +rules "~MD013" diff --git a/vendor/github.com/tektoncd/plumbing/scripts/presubmit-tests.sh b/vendor/github.com/tektoncd/plumbing/scripts/presubmit-tests.sh new file mode 100644 index 00000000000..7869101b947 --- /dev/null +++ b/vendor/github.com/tektoncd/plumbing/scripts/presubmit-tests.sh @@ -0,0 +1,362 @@ +#!/bin/bash + +# Copyright 2018 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a helper script for Tekton presubmit test scripts. +# See README.md for instructions on how to use it. + +source $(dirname ${BASH_SOURCE})/library.sh + +# Custom configuration of presubmit tests +readonly DISABLE_MD_LINTING=${DISABLE_MD_LINTING:-0} +readonly DISABLE_YAML_LINTING=${DISABLE_YAML_LINTING:-0} +readonly DISABLE_MD_LINK_CHECK=${DISABLE_MD_LINK_CHECK:-0} +readonly PRESUBMIT_TEST_FAIL_FAST=${PRESUBMIT_TEST_FAIL_FAST:-0} + +# Extensions or file patterns that don't require presubmit tests. +readonly NO_PRESUBMIT_FILES=(\.png \.gitignore \.gitattributes ^OWNERS ^OWNERS_ALIASES ^AUTHORS) + +# Flag if this is a presubmit run or not. +[[ IS_PROW && -n "${PULL_PULL_SHA}" ]] && IS_PRESUBMIT=1 || IS_PRESUBMIT=0 +readonly IS_PRESUBMIT + +# List of changed files on presubmit, LF separated. +CHANGED_FILES="" + +# Flags that this PR is exempt of presubmit tests. +IS_PRESUBMIT_EXEMPT_PR=0 + +# Flags that this PR contains only changes to documentation. +IS_DOCUMENTATION_PR=0 + +# Returns true if PR only contains the given file regexes. +# Parameters: $1 - file regexes, space separated. +function pr_only_contains() { + [[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]] +} + +# List changed files in the current PR. +# This is implemented as a function so it can be mocked in unit tests. +function list_changed_files() { + githubhelper -list-changed-files +} + +# Initialize flags and context for presubmit tests: +# CHANGED_FILES, IS_PRESUBMIT_EXEMPT_PR and IS_DOCUMENTATION_PR. +function initialize_environment() { + CHANGED_FILES="" + IS_PRESUBMIT_EXEMPT_PR=0 + IS_DOCUMENTATION_PR=0 + (( ! IS_PRESUBMIT )) && return + CHANGED_FILES="$(list_changed_files)" + if [[ -n "${CHANGED_FILES}" ]]; then + echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${CHANGED_FILES}" + local no_presubmit_files="${NO_PRESUBMIT_FILES[*]}" + pr_only_contains "${no_presubmit_files}" && IS_PRESUBMIT_EXEMPT_PR=1 + pr_only_contains "\.md ${no_presubmit_files}" && IS_DOCUMENTATION_PR=1 + else + header "NO CHANGED FILES REPORTED, ASSUMING IT'S AN ERROR AND RUNNING TESTS ANYWAY" + fi + readonly CHANGED_FILES + readonly IS_DOCUMENTATION_PR + readonly IS_PRESUBMIT_EXEMPT_PR +} + +# Display a pass/fail banner for a test group. +# Parameters: $1 - test group name (e.g., build) +# $2 - result (0=passed, 1=failed) +function results_banner() { + local result + [[ $2 -eq 0 ]] && result="PASSED" || result="FAILED" + header "$1 tests ${result}" +} + +# Run build tests. If there's no `build_tests` function, run the default +# build test runner. +function run_build_tests() { + (( ! RUN_BUILD_TESTS )) && return 0 + header "Running build tests" + local failed=0 + # Run pre-build tests, if any + if function_exists pre_build_tests; then + pre_build_tests || failed=1 + fi + # Don't run build tests if pre-build tests failed + if (( ! failed )); then + if function_exists build_tests; then + build_tests || failed=1 + else + default_build_test_runner || failed=1 + fi + fi + # Don't run post-build tests if pre/build tests failed + if (( ! failed )) && function_exists post_build_tests; then + post_build_tests || failed=1 + fi + results_banner "Build" ${failed} + return ${failed} +} + +# Perform markdown build tests if necessary, unless disabled. +function markdown_build_tests() { + (( DISABLE_MD_LINTING && DISABLE_MD_LINK_CHECK )) && return 0 + # Get changed markdown files (ignore /vendor and deleted files) + local mdfiles="" + for file in $(echo "${CHANGED_FILES}" | grep \.md$ | grep -v ^vendor/ | grep -v ^third_party/); do + [[ -f "${file}" ]] && mdfiles="${mdfiles} ${file}" + done + [[ -z "${mdfiles}" ]] && return 0 + local failed=0 + if (( ! DISABLE_MD_LINTING )); then + subheader "Linting the markdown files" + lint_markdown ${mdfiles} || failed=1 + fi + if (( ! DISABLE_MD_LINK_CHECK )); then + subheader "Checking links in the markdown files" + check_links_in_markdown ${mdfiles} || failed=1 + fi + return ${failed} +} + +# Perform yaml build tests if necessary, unless disabled. +function yaml_build_tests() { + (( DISABLE_YAML_LINTING )) && return 0 + subheader "Linting the yaml files" + local yamlfiles="" + for file in $(echo "${CHANGED_FILES}" | grep '\.yaml$\|\.yml$' | grep -v ^vendor/); do + [[ -f "${file}" ]] && yamlfiles="${yamlfiles} ${file}" + done + [[ -z "${yamlfiles}" ]] && return 0 + yamllint ${yamlfiles} +} + +# Default build test runner that: +# * check go code style with gofmt +# * check markdown files +# * check yaml files +# * `go build` on the entire repo +# * run `/hack/verify-codegen.sh` (if it exists) +# * check licenses in all go packages +function default_build_test_runner() { + local failed=0 + # Check go code style with gofmt; exclude vendor/ files + subheader "Checking go code style with gofmt" + gofmt_out=$(gofmt -d $(find * -name '*.go' ! -path 'vendor/*' ! -path 'third_party/*')) + if [[ -n "$gofmt_out" ]]; then + failed=1 + fi + echo "$gofmt_out" + # Perform markdown build checks first + markdown_build_tests || failed=1 + # Check yaml using yamllint + yaml_build_tests || failed=1 + # For documentation PRs, just check the md files + (( IS_DOCUMENTATION_PR )) && return ${failed} + # Skip build test if there is no go code + local go_pkg_dirs="$(go list ./...)" + [[ -z "${go_pkg_dirs}" ]] && return ${failed} + # Ensure all the code builds + subheader "Checking that go code builds" + go build -v ./... || failed=1 + # Get all build tags in go code. Ignore tags in /vendor, /third_party, and + # tools for compatibility to allow for Go modules binary tool tracking. See + # https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module + # for more details. + local tags="$(grep -r '// +build' . \ + | grep -v '^./vendor/' | grep -v '^./third_party/' | grep -v "+build tools" | cut -f3 -d' ' | sort | uniq | tr '\n' ' ')" + if [[ -n "${tags}" ]]; then + go test -run=^$ -tags="${tags}" ./... || failed=1 + fi + if [[ -f ./hack/verify-codegen.sh ]]; then + subheader "Checking autogenerated code is up-to-date" + ./hack/verify-codegen.sh || failed=1 + fi + # Check that we don't have any forbidden licenses in our images. + subheader "Checking for forbidden licenses" + check_licenses ${go_pkg_dirs} || failed=1 + return ${failed} +} + +# Run unit tests. If there's no `unit_tests` function, run the default +# unit test runner. +function run_unit_tests() { + (( ! RUN_UNIT_TESTS )) && return 0 + header "Running unit tests" + local failed=0 + # Run pre-unit tests, if any + if function_exists pre_unit_tests; then + pre_unit_tests || failed=1 + fi + # Don't run unit tests if pre-unit tests failed + if (( ! failed )); then + if function_exists unit_tests; then + unit_tests || failed=1 + else + default_unit_test_runner || failed=1 + fi + fi + # Don't run post-unit tests if pre/unit tests failed + if (( ! failed )) && function_exists post_unit_tests; then + post_unit_tests || failed=1 + fi + results_banner "Unit" ${failed} + return ${failed} +} + +# Default unit test runner that runs all go tests in the repo. +function default_unit_test_runner() { + report_go_test ./... +} + +# Run integration tests. If there's no `integration_tests` function, run the +# default integration test runner. +function run_integration_tests() { + # Don't run integration tests if not requested OR on documentation PRs + (( ! RUN_INTEGRATION_TESTS )) && return 0 + (( IS_DOCUMENTATION_PR )) && return 0 + header "Running integration tests" + local failed=0 + # Run pre-integration tests, if any + if function_exists pre_integration_tests; then + pre_integration_tests || failed=1 + fi + # Don't run integration tests if pre-integration tests failed + if (( ! failed )); then + if function_exists integration_tests; then + integration_tests || failed=1 + else + default_integration_test_runner || failed=1 + fi + fi + # Don't run integration tests if pre/integration tests failed + if (( ! failed )) && function_exists post_integration_tests; then + post_integration_tests || failed=1 + fi + results_banner "Integration" ${failed} + return ${failed} +} + +# Default integration test runner that runs all `test/e2e-*tests.sh`. +function default_integration_test_runner() { + local options="" + local failed=0 + (( EMIT_METRICS )) && options="--emit-metrics" + for e2e_test in $(find test/ -name e2e-*tests.sh); do + echo "Running integration test ${e2e_test}" + if ! ${e2e_test} ${options}; then + failed=1 + fi + done + return ${failed} +} + +# Options set by command-line flags. +RUN_BUILD_TESTS=0 +RUN_UNIT_TESTS=0 +RUN_INTEGRATION_TESTS=0 +EMIT_METRICS=0 + +# Process flags and run tests accordingly. +function main() { + initialize_environment + if (( IS_PRESUBMIT_EXEMPT_PR )) && (( ! IS_DOCUMENTATION_PR )); then + header "Commit only contains changes that don't require tests, skipping" + exit 0 + fi + + # Show the version of the tools we're using + if (( IS_PROW )); then + # Disable gcloud update notifications + gcloud config set component_manager/disable_update_check true + header "Current test setup" + echo ">> gcloud SDK version" + gcloud version + echo ">> kubectl version" + kubectl version --client + echo ">> go version" + go version + echo ">> git version" + git version + echo ">> bazel version" + bazel version 2> /dev/null + if [[ "${DOCKER_IN_DOCKER_ENABLED}" == "true" ]]; then + echo ">> docker version" + docker version + fi + fi + + if function_exists extra_initialization; then + extra_initialization + fi + + [[ -z $1 ]] && set -- "--all-tests" + + local TEST_TO_RUN="" + + while [[ $# -ne 0 ]]; do + local parameter=$1 + case ${parameter} in + --build-tests) RUN_BUILD_TESTS=1 ;; + --unit-tests) RUN_UNIT_TESTS=1 ;; + --integration-tests) RUN_INTEGRATION_TESTS=1 ;; + --emit-metrics) EMIT_METRICS=1 ;; + --all-tests) + RUN_BUILD_TESTS=1 + RUN_UNIT_TESTS=1 + RUN_INTEGRATION_TESTS=1 + ;; + --run-test) + shift + [[ $# -ge 1 ]] || abort "missing executable after --run-test" + TEST_TO_RUN=$1 + ;; + *) abort "error: unknown option ${parameter}" ;; + esac + shift + done + + readonly RUN_BUILD_TESTS + readonly RUN_UNIT_TESTS + readonly RUN_INTEGRATION_TESTS + readonly EMIT_METRICS + readonly TEST_TO_RUN + + cd ${REPO_ROOT_DIR} + + # Tests to be performed, in the right order if --all-tests is passed. + + local failed=0 + + if [[ -n "${TEST_TO_RUN}" ]]; then + if (( RUN_BUILD_TESTS || RUN_UNIT_TESTS || RUN_INTEGRATION_TESTS )); then + abort "--run-test must be used alone" + fi + # If this is a presubmit run, but a documentation-only PR, don't run the test + (( IS_PRESUBMIT && IS_DOCUMENTATION_PR )) && exit 0 + ${TEST_TO_RUN} || failed=1 + fi + + run_build_tests || failed=1 + # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run unit tests if build tests failed + if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then + run_unit_tests || failed=1 + fi + # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run integration tests if build/unit tests failed + if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then + run_integration_tests || failed=1 + fi + + exit ${failed} +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e91707f1cd..d2ba217f04f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -257,6 +257,7 @@ github.com/sirupsen/logrus github.com/spf13/pflag # github.com/tektoncd/plumbing v0.0.0-20191216083742-847dcf196de9 github.com/tektoncd/plumbing +github.com/tektoncd/plumbing/scripts # go.opencensus.io v0.22.1 go.opencensus.io go.opencensus.io/internal