diff --git a/config/config-feature-flags.yaml b/config/config-feature-flags.yaml index 0fd7c5ceb29..b57375c288e 100644 --- a/config/config-feature-flags.yaml +++ b/config/config-feature-flags.yaml @@ -78,3 +78,7 @@ data: # See https://github.com/tektoncd/pipeline/issues/2981 for more # info. require-git-ssh-secret-known-hosts: "false" + # Setting this flag to "true" enables the use of Tekton OCI bundle. + # This is an experimental feature and thus should still be considered + # an alpha feature. + enable-tekton-oci-bundles: "false" diff --git a/docs/install.md b/docs/install.md index 55a0400e05b..25c77f79c8d 100644 --- a/docs/install.md +++ b/docs/install.md @@ -264,7 +264,7 @@ data: ## Configuring self-signed cert for private registry -The `SSL_CERT_DIR` is set to `/etc/ssl/certs` as the default cert directory. If you are using a self-signed cert for private registry and the cert file is not under the default cert directory, configure your registry cert in the `config-registry-cert` `ConfigMap` with the key `cert`. +The `SSL_CERT_DIR` is set to `/etc/ssl/certs` as the default cert directory. If you are using a self-signed cert for private registry and the cert file is not under the default cert directory, configure your registry cert in the `config-registry-cert` `ConfigMap` with the key `cert`. ## Customizing basic execution parameters @@ -303,7 +303,7 @@ file lists the keys you can customize along with their default values. To customize the behavior of the Pipelines Controller, modify the ConfigMap `feature-flags` as follows: - `disable-affinity-assistant` - set this flag to `true` to disable the [Affinity Assistant](./workspaces.md#specifying-workspace-order-in-a-pipeline-and-affinity-assistants) - that is used to provide Node Affinity for `TaskRun` pods that share workspace volume. + that is used to provide Node Affinity for `TaskRun` pods that share workspace volume. The Affinity Assistant is incompatible with other affinity rules configured for `TaskRun` pods. @@ -326,7 +326,7 @@ for each `Step` that does not have its working directory explicitly set with `/w For more information, see the [associated issue](https://github.com/tektoncd/pipeline/issues/1836). - `running-in-environment-with-injected-sidecars`: set this flag to `"true"` to allow the -Tekton controller to set the `tekton.dev/ready` annotation at pod creation time for +Tekton controller to set the `tekton.dev/ready` annotation at pod creation time for TaskRuns with no Sidecars specified. Enabling this option should decrease the time it takes for a TaskRun to start running. However, for clusters that use injected sidecars e.g. istio enabling this option can lead to unexpected behavior. @@ -335,7 +335,15 @@ enabling this option can lead to unexpected behavior. Git SSH Secrets include a `known_hosts` field. This ensures that a git remote server's key is validated before data is accepted from it when authenticating over SSH. Secrets that don't include a `known_hosts` will result in the TaskRun failing validation and -not running. +not running. + +- `enable-tekton-oci-bundles`: set this flag to `"true"` to enable the + tekton OCI bundle usage (see [the tekton bundle + contract](./tekton-bundle-contracts.md)). Enabling this option + allows the use of `bundle` field in `taskRef` and `pipelineRef` for + `Pipeline`, `PipelineRun` and `TaskRun`. By default, this option is + disabled (`"false"`), which means it is disallowed to use the + `bundle` field. For example: diff --git a/docs/pipelineruns.md b/docs/pipelineruns.md index d29282a55fb..c54ee3a8059 100644 --- a/docs/pipelineruns.md +++ b/docs/pipelineruns.md @@ -60,7 +60,7 @@ A `PipelineRun` definition supports the following fields: object that supplies specific execution credentials for the `Pipeline`. - [`serviceAccountNames`](#mapping-serviceaccount-credentials-to-tasks) - Maps specific `serviceAccountName` values to `Tasks` in the `Pipeline`. This overrides the credentials set for the entire `Pipeline`. - - [`taskRunSpec`](#specifying-task-run-specs) - Specifies a list of `PipelineRunTaskSpec` which allows for setting `ServiceAccountName` and [`Pod` template](./podtemplates.md) for each task. This overrides the `Pod` template set for the entire `Pipeline`. + - [`taskRunSpec`](#specifying-task-run-specs) - Specifies a list of `PipelineRunTaskSpec` which allows for setting `ServiceAccountName` and [`Pod` template](./podtemplates.md) for each task. This overrides the `Pod` template set for the entire `Pipeline`. - [`timeout`](#configuring-a-failure-timeout) - Specifies the timeout before the `PipelineRun` fails. - [`podTemplate`](#pod-template) - Specifies a [`Pod` template](./podtemplates.md) to use as the basis for the configuration of the `Pod` that executes each `Task`. @@ -70,7 +70,7 @@ A `PipelineRun` definition supports the following fields: ### Specifying the target `Pipeline` -You must specify the target `Pipeline` that you want the `PipelineRun` to execute, either by referencing +You must specify the target `Pipeline` that you want the `PipelineRun` to execute, either by referencing an existing `Pipeline` definition, or embedding a `Pipeline` definition directly in the `PipelineRun`. To specify the target `Pipeline` by reference, use the `pipelineRef` field: @@ -81,22 +81,6 @@ spec: name: mypipeline ``` - -You may also use a `Tekton Bundle` to reference a pipeline defined remotely. - - ```yaml - spec: - pipelineRef: - name: mypipeline - bundle: docker.io/myrepo/mycatalog:v1.0 - ``` - -The syntax and caveats are similar to using `Tekton Bundles` for `Task` references -in [Pipelines](pipelines.md#tekton-bundles) or [TaskRuns](taskruns.md#tekton-bundles). - -`Tekton Bundles` may be constructed with any toolsets that produce valid OCI image artifacts -so long as the artifact adheres to the [contract](tekton-bundle-contracts.md). - To embed a `Pipeline` definition in the `PipelineRun`, use the `pipelineSpec` field: ```yaml @@ -156,6 +140,27 @@ spec: ... ``` +#### Tekton Bundles + +**Note: This is only allowed if `enable-tekton-oci-bundles` is set to +`"true"` in the `feature-flags` configmap, see [`install.md`](./install.md#customizing-the-pipelines-controller-behavior)** + +You may also use a `Tekton Bundle` to reference a pipeline defined remotely. + + ```yaml + spec: + pipelineRef: + name: mypipeline + bundle: docker.io/myrepo/mycatalog:v1.0 + ``` + +The syntax and caveats are similar to using `Tekton Bundles` for `Task` references +in [Pipelines](pipelines.md#tekton-bundles) or [TaskRuns](taskruns.md#tekton-bundles). + +`Tekton Bundles` may be constructed with any toolsets that produce valid OCI image artifacts +so long as the artifact adheres to the [contract](tekton-bundle-contracts.md). + + ## Specifying `Resources` A `Pipeline` requires [`PipelineResources`](resources.md) to provide inputs and store outputs @@ -223,7 +228,7 @@ to all `persistentVolumeClaims` generated internally. You can specify `Parameters` that you want to pass to the `Pipeline` during execution, including different values of the same parameter for different `Tasks` in the `Pipeline`. -**Note:** You must specify all the `Parameters` that the `Pipeline` expects. Parameters +**Note:** You must specify all the `Parameters` that the `Pipeline` expects. Parameters that have default values specified in Pipeline are not required to be provided by PipelineRun. For example: @@ -236,14 +241,14 @@ spec: - name: pl-param-y value: "500" ``` -You can pass in extra `Parameters` if needed depending on your use cases. An example use -case is when your CI system autogenerates `PipelineRuns` and it has `Parameters` it wants to -provide to all `PipelineRuns`. Because you can pass in extra `Parameters`, you don't have to +You can pass in extra `Parameters` if needed depending on your use cases. An example use +case is when your CI system autogenerates `PipelineRuns` and it has `Parameters` it wants to +provide to all `PipelineRuns`. Because you can pass in extra `Parameters`, you don't have to go through the complexity of checking each `Pipeline` and providing only the required params. ### Specifying custom `ServiceAccount` credentials -You can execute the `Pipeline` in your `PipelineRun` with a specific set of credentials by +You can execute the `Pipeline` in your `PipelineRun` with a specific set of credentials by specifying a `ServiceAccount` object name in the `serviceAccountName` field in your `PipelineRun` definition. If you do not explicitly specify this, the `TaskRuns` created by your `PipelineRun` will execute with the credentials specified in the `configmap-defaults` `ConfigMap`. If this @@ -256,7 +261,7 @@ For more information, see [`ServiceAccount`](auth.md). If you require more granularity in specifying execution credentials, use the `serviceAccountNames` field to map a specific `serviceAccountName` value to a specific `Task` in the `Pipeline`. This overrides the global -`serviceAccountName` you may have set for the `Pipeline` as described in the previous section. +`serviceAccountName` you may have set for the `Pipeline` as described in the previous section. For example, if you specify these mappings: @@ -358,7 +363,7 @@ spec: disktype: ssd ``` -If used with this `Pipeline`, `build-task` will use the task specific `PodTemplate` (where `nodeSelector` has `disktype` equal to `ssd`). +If used with this `Pipeline`, `build-task` will use the task specific `PodTemplate` (where `nodeSelector` has `disktype` equal to `ssd`). ### Specifying `Workspaces` @@ -474,7 +479,7 @@ When a `PipelineRun` changes status, [events](events.md#pipelineruns) are trigge When a `PipelineRun` has `Tasks` with [WhenExpressions](pipelines.md#guard-task-execution-using-whenexpressions): - If the `WhenExpressions` evaluate to `true`, the `Task` is executed then the `TaskRun` and its resolved `WhenExpressions` will be listed in the `Task Runs` section of the `status` of the `PipelineRun`. -- If the `WhenExpressions` evaluate to `false`, the `Task` is skipped then its name and its resolved `WhenExpressions` will be listed in the `Skipped Tasks` section of the `status` of the `PipelineRun`. +- If the `WhenExpressions` evaluate to `false`, the `Task` is skipped then its name and its resolved `WhenExpressions` will be listed in the `Skipped Tasks` section of the `status` of the `PipelineRun`. ```yaml Conditions: diff --git a/docs/pipelines.md b/docs/pipelines.md index e44962589e4..46ba85f6c17 100644 --- a/docs/pipelines.md +++ b/docs/pipelines.md @@ -45,7 +45,7 @@ A `Pipeline` definition supports the following fields: - [`metadata`][kubernetes-overview] - Specifies metadata that uniquely identifies the `Pipeline` object. For example, a `name`. - [`spec`][kubernetes-overview] - Specifies the configuration information for - this `Pipeline` object. This must include: + this `Pipeline` object. This must include: - [`tasks`](#adding-tasks-to-the-pipeline) - Specifies the `Tasks` that comprise the `Pipeline` and the details of their execution. - Optional: @@ -61,7 +61,7 @@ A `Pipeline` definition supports the following fields: execution of a `Task` after a failure. Does not apply to execution cancellations. - [`conditions`](#guard-task-execution-using-conditions) - Specifies `Conditions` that only allow a `Task` to execute if they successfully evaluate. - - [`timeout`](#configuring-the-failure-timeout) - Specifies the timeout before a `Task` fails. + - [`timeout`](#configuring-the-failure-timeout) - Specifies the timeout before a `Task` fails. - [`results`](#configuring-execution-results-at-the-pipeline-level) - Specifies the location to which the `Pipeline` emits its execution results. - [`description`](#adding-a-description) - Holds an informative description of the `Pipeline` object. @@ -136,7 +136,7 @@ varies throughout its execution. If no value is specified, the `type` field defa When the actual parameter value is supplied, its parsed type is validated against the `type` field. The `description` and `default` fields for a `Parameter` are optional. -The following example illustrates the use of `Parameters` in a `Pipeline`. +The following example illustrates the use of `Parameters` in a `Pipeline`. The following `Pipeline` declares an input parameter called `context` and passes its value to the `Task` to set the value of the `pathToContext` parameter within the `Task`. @@ -231,6 +231,9 @@ spec: ### Tekton Bundles +**Note: This is only allowed if `enable-tekton-oci-bundles` is set to +`"true"` in the `feature-flags` configmap, see [`install.md`](./install.md#customizing-the-pipelines-controller-behavior)** + You may also specify your `Task` reference using a `Tekton Bundle`. A `Tekton Bundle` is an OCI artifact that contains Tekton resources like `Tasks` which can be referenced within a `taskRef`. @@ -244,7 +247,7 @@ contains Tekton resources like `Tasks` which can be referenced within a `taskRef ``` Here, the `bundle` field is the full reference url to the artifact. The name is the -`metadata.name` field of the `Task`. +`metadata.name` field of the `Task`. You may also specify a `tag` as you would with a Docker image which will give you a fixed, repeatable reference to a `Task`. @@ -263,7 +266,7 @@ You may also specify a fixed digest instead of a tag. ```yaml spec: tasks: - - name: hello-world + - name: hello-world taskRef: name: echo-task bundle: docker.io/myrepo/mycatalog@sha256:abc123 @@ -282,14 +285,14 @@ so long as the artifact adheres to the [contract](tekton-bundle-contracts.md). If a `Task` in your `Pipeline` needs to use the output of a previous `Task` as its input, use the optional `from` parameter to specify a list of `Tasks` -that must execute **before** the `Task` that requires their outputs as its -input. When your target `Task` executes, only the version of the desired +that must execute **before** the `Task` that requires their outputs as its +input. When your target `Task` executes, only the version of the desired `PipelineResource` produced by the last `Task` in this list is used. The `name` of this output `PipelineResource` output must match the `name` of the -input `PipelineResource` specified in the `Task` that ingests it. +input `PipelineResource` specified in the `Task` that ingests it. In the example below, the `deploy-app` `Task` ingests the output of the `build-app` -`Task` named `my-image` as its input. Therefore, the `build-app` `Task` will +`Task` named `my-image` as its input. Therefore, the `build-app` `Task` will execute before the `deploy-app` `Task` regardless of the order in which those `Tasks` are declared in the `Pipeline`. @@ -377,11 +380,11 @@ The components of `WhenExpressions` are `Input`, `Operator` and `Values`: - `Operator` represents an `Input`'s relationship to a set of `Values`. A valid `Operator` must be provided, which can be either `in` or `notin`. - `Values` is an array of string values. The `Values` array must be provided and be non-empty. It can contain static values or variables ([`Parameters`](#specifying-parameters), [`Results`](#using-results) or [a Workspaces's `bound` state](#specifying-workspaces)). -The [`Parameters`](#specifying-parameters) are read from the `Pipeline` and [`Results`](#using-results) are read directly from previous [`Tasks`](#adding-tasks-to-the-pipeline). Using [`Results`](#using-results) in a `WhenExpression` in a guarded `Task` introduces a resource dependency on the previous `Task` that produced the `Result`. +The [`Parameters`](#specifying-parameters) are read from the `Pipeline` and [`Results`](#using-results) are read directly from previous [`Tasks`](#adding-tasks-to-the-pipeline). Using [`Results`](#using-results) in a `WhenExpression` in a guarded `Task` introduces a resource dependency on the previous `Task` that produced the `Result`. -The declared `WhenExpressions` are evaluated before the `Task` is run. If all the `WhenExpressions` evaluate to `True`, the `Task` is run. If any of the `WhenExpressions` evaluate to `False`, the `Task` is not run and the `Task` is listed in the [`Skipped Tasks` section of the `PipelineRunStatus`](pipelineruns.md#monitoring-execution-status). +The declared `WhenExpressions` are evaluated before the `Task` is run. If all the `WhenExpressions` evaluate to `True`, the `Task` is run. If any of the `WhenExpressions` evaluate to `False`, the `Task` is not run and the `Task` is listed in the [`Skipped Tasks` section of the `PipelineRunStatus`](pipelineruns.md#monitoring-execution-status). -In these examples, `first-create-file` task will only be executed if the `path` parameter is `README.md`, `echo-file-exists` task will only be executed if the `exists` result from `check-file` task is `yes` and `run-lint` task will only be executed if the `lint-config` optional workspace has been provided by a PipelineRun. +In these examples, `first-create-file` task will only be executed if the `path` parameter is `README.md`, `echo-file-exists` task will only be executed if the `exists` result from `check-file` task is `yes` and `run-lint` task will only be executed if the `lint-config` optional workspace has been provided by a PipelineRun. ```yaml tasks: @@ -426,7 +429,7 @@ There are a lot of scenarios where `WhenExpressions` can be really useful. Some ### Guard `Task` execution using `Conditions` -**Note:** `Conditions` are deprecated, use [`WhenExpressions`](#guard-task-execution-using-whenexpressions) instead. +**Note:** `Conditions` are deprecated, use [`WhenExpressions`](#guard-task-execution-using-whenexpressions) instead. To run a `Task` only when certain conditions are met, it is possible to _guard_ task execution using the `conditions` field. The `conditions` field allows you to list a series of references to @@ -450,17 +453,17 @@ tasks: name: deploy ``` -Unlike regular task failures, condition failures do not automatically fail the entire `PipelineRun` -- +Unlike regular task failures, condition failures do not automatically fail the entire `PipelineRun` -- other tasks that are **not dependent** on the `Task` (via `from` or `runAfter`) are still run. In this example, `(task C)` has a `condition` set to _guard_ its execution. If the condition is **not** successfully evaluated, task `(task D)` will not be run, but all other tasks in the pipeline that not depend on `(task C)` will be executed and the `PipelineRun` will successfully complete. - + ``` (task B) — (task E) - / - (task A) + / + (task A) \ (guarded task C) — (task D) ``` @@ -495,7 +498,7 @@ tasks: You can use the `Timeout` field in the `Task` spec within the `Pipeline` to set the timeout of the `TaskRun` that executes that `Task` within the `PipelineRun` that executes your `Pipeline.` The `Timeout` value is a `duration` conforming to Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration) -format. For example, valid values are `1h30m`, `1h`, `1m`, and `60s`. +format. For example, valid values are `1h30m`, `1h`, `1m`, and `60s`. **Note:** If you do not specify a `Timeout` value, Tekton instead honors the timeout for the [`PipelineRun`](pipelineruns.md#configuring-a-pipelinerun). @@ -528,7 +531,7 @@ a `Result` and another receives it as a `Parameter` with a variable such as When one `Task` receives the `Results` of another, there is a dependency created between those two `Tasks`. In order for the receiving `Task` to get data from another `Task's` `Result`, the `Task` producing the `Result` must run first. Tekton enforces this `Task` ordering -by ensuring that the `Task` emitting the `Result` executes before any `Task` that uses it. +by ensuring that the `Task` emitting the `Result` executes before any `Task` that uses it. In the snippet below, a param is provided its value from the `commit` `Result` emitted by the `checkout-source` `Task`. Tekton will make sure that the `checkout-source` `Task` runs diff --git a/docs/taskruns.md b/docs/taskruns.md index 3970cd973a8..7d2d4142a6d 100644 --- a/docs/taskruns.md +++ b/docs/taskruns.md @@ -95,10 +95,13 @@ spec: - /kaniko/executor args: - --destination=gcr.io/my-project/gohelloworld -``` +``** #### Tekton Bundles +**Note: This is only allowed if `enable-tekton-oci-bundles` is set to +`"true"` in the `feature-flags` configmap, see [`install.md`](./install.md#customizing-the-pipelines-controller-behavior)** + You may also reference `Tasks` that are defined outside of your cluster using `Tekton Bundles`. A `Tekton Bundle` is an OCI artifact that contains Tekton resources like `Tasks` which can be referenced within a `taskRef`. @@ -111,7 +114,7 @@ taskRef: ``` Here, the `bundle` field is the full reference url to the artifact. The name is the -`metadata.name` field of the `Task`. +`metadata.name` field of the `Task`. You may also specify a `tag` as you would with a Docker image which will give you a repeatable reference to a `Task`. @@ -134,7 +137,7 @@ taskRef: A working example can be found [here](../examples/v1beta1/taskruns/tekton-bundles.yaml). Any of the above options will fetch the image using the `ImagePullSecrets` attached to the -`ServiceAccount` specified in the `TaskRun`. See the [Service Account](#service-account) +`ServiceAccount` specified in the `TaskRun`. See the [Service Account](#service-account) section for details on how to configure a `ServiceAccount` on a `TaskRun`. The `TaskRun` will then run that `Task` without registering it in the cluster allowing multiple versions of the same named `Task` to be run at once. @@ -268,8 +271,8 @@ that updates files on a shared volume, or a network proxy. Tekton supports the injection of `Sidecars` into a `Pod` belonging to a `TaskRun` with the condition that each `Sidecar` running inside the -`Pod` are terminated as soon as all `Steps` in the `Task` complete execution. -This might result in the `Pod` including each affected `Sidecar` with a +`Pod` are terminated as soon as all `Steps` in the `Task` complete execution. +This might result in the `Pod` including each affected `Sidecar` with a retry count of 1 and a different container image than expected. We are aware of the following issues affecting Tekton's implementation of `Sidecars`: @@ -301,27 +304,27 @@ For more information, see the [`LimitRange` code example](../examples/v1beta1/ta ## Configuring the failure timeout -You can use the `timeout` field to set the `TaskRun's` desired timeout value. If you do not specify this -value for the `TaskRun`, the global default timeout value applies. If you set the timeout to 0, the `TaskRun` will +You can use the `timeout` field to set the `TaskRun's` desired timeout value. If you do not specify this +value for the `TaskRun`, the global default timeout value applies. If you set the timeout to 0, the `TaskRun` will have no timeout and will run until it completes successfully or fails from an error. The global default timeout is set to 60 minutes when you first install Tekton. You can set a different global default timeout value using the `default-timeout-minutes` field in -[`config/config-defaults.yaml`](./../config/config-defaults.yaml). If you set the global timeout to 0, -all `TaskRuns` that do not have a timeout set will have no timeout and will run until it completes successfully +[`config/config-defaults.yaml`](./../config/config-defaults.yaml). If you set the global timeout to 0, +all `TaskRuns` that do not have a timeout set will have no timeout and will run until it completes successfully or fails from an error. The `timeout` value is a `duration` conforming to Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration) format. For example, valid -values are `1h30m`, `1h`, `1m`, `60s`, and `0`. +values are `1h30m`, `1h`, `1m`, `60s`, and `0`. -If a `TaskRun` runs longer than its timeout value, the pod associated with the `TaskRun` will be deleted. This -means that the logs of the `TaskRun` are not preserved. The deletion of the `TaskRun` pod is necessary in order to -stop `TaskRun` step containers from running. +If a `TaskRun` runs longer than its timeout value, the pod associated with the `TaskRun` will be deleted. This +means that the logs of the `TaskRun` are not preserved. The deletion of the `TaskRun` pod is necessary in order to +stop `TaskRun` step containers from running. ### Specifying `ServiceAccount' credentials -You can execute the `Task` in your `TaskRun` with a specific set of credentials by +You can execute the `Task` in your `TaskRun` with a specific set of credentials by specifying a `ServiceAccount` object name in the `serviceAccountName` field in your `TaskRun` definition. If you do not explicitly specify this, the `TaskRun` executes with the credentials specified in the `configmap-defaults` `ConfigMap`. If this default is not specified, `TaskRuns` @@ -419,11 +422,11 @@ Status: ## Cancelling a `TaskRun` -To cancel a `TaskRun` that's currently executing, update its status to mark it as cancelled. +To cancel a `TaskRun` that's currently executing, update its status to mark it as cancelled. -When you cancel a TaskRun, the running pod associated with that `TaskRun` is deleted. This -means that the logs of the `TaskRun` are not preserved. The deletion of the `TaskRun` pod is necessary -in order to stop `TaskRun` step containers from running. +When you cancel a TaskRun, the running pod associated with that `TaskRun` is deleted. This +means that the logs of the `TaskRun` are not preserved. The deletion of the `TaskRun` pod is necessary +in order to stop `TaskRun` step containers from running. Example of cancelling a `TaskRun`: @@ -711,14 +714,14 @@ data: ### Running Step Containers as a Non Root User -All steps that do not require to be run as a root user should make use of TaskRun features to -designate the container for a step runs as a user without root permissions. As a best practice, -running containers as non root should be built into the container image to avoid any possibility -of the container being run as root. However, as a further measure of enforcing this practice, -TaskRun pod templates can be used to specify how containers should be run within a TaskRun pod. +All steps that do not require to be run as a root user should make use of TaskRun features to +designate the container for a step runs as a user without root permissions. As a best practice, +running containers as non root should be built into the container image to avoid any possibility +of the container being run as root. However, as a further measure of enforcing this practice, +TaskRun pod templates can be used to specify how containers should be run within a TaskRun pod. -An example of using a TaskRun pod template is shown below to specify that containers running via this -TaskRun's pod should run as non root and run as user 1001 if the container itself does not specify what +An example of using a TaskRun pod template is shown below to specify that containers running via this +TaskRun's pod should run as non root and run as user 1001 if the container itself does not specify what user to run as: ```yaml @@ -735,8 +738,8 @@ spec: runAsUser: 1001 ``` -If a Task step specifies that it is to run as a different user than what is specified in the pod template, -the step's `securityContext` will be applied instead of what is specified at the pod level. An example of +If a Task step specifies that it is to run as a different user than what is specified in the pod template, +the step's `securityContext` will be applied instead of what is specified at the pod level. An example of this is available as a [TaskRun example](../examples/v1beta1/taskruns/run-steps-as-non-root.yaml). More information about Pod and Container Security Contexts can be found via the [Kubernetes website](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod). diff --git a/examples/v1beta1/taskruns/tekton-bundles.yaml b/examples/v1beta1/taskruns/tekton-bundles.yaml deleted file mode 100644 index ac659ad8520..00000000000 --- a/examples/v1beta1/taskruns/tekton-bundles.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# TODO: Move the example image to a tekton owned repo. -apiVersion: tekton.dev/v1beta1 -kind: TaskRun -metadata: - name: remote-task-reference -spec: - taskRef: - name: hello-world - bundle: docker.io/ptasci67/example-oci@sha256:053a6cb9f3711d4527dd0d37ac610e8727ec0288a898d5dfbd79b25bcaa29828 diff --git a/pkg/apis/config/feature_flags.go b/pkg/apis/config/feature_flags.go index ee0f898ab31..b1d18201b9f 100644 --- a/pkg/apis/config/feature_flags.go +++ b/pkg/apis/config/feature_flags.go @@ -31,12 +31,14 @@ const ( disableCredsInitKey = "disable-creds-init" runningInEnvWithInjectedSidecarsKey = "running-in-environment-with-injected-sidecars" requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" // nolint: gosec + enableTektonOCIBundles = "enable-tekton-oci-bundles" DefaultDisableHomeEnvOverwrite = false DefaultDisableWorkingDirOverwrite = false DefaultDisableAffinityAssistant = false DefaultDisableCredsInit = false DefaultRunningInEnvWithInjectedSidecars = true DefaultRequireGitSSHSecretKnownHosts = false + DefaultEnableTektonOciBundles = false ) // FeatureFlags holds the features configurations @@ -48,6 +50,7 @@ type FeatureFlags struct { DisableCredsInit bool RunningInEnvWithInjectedSidecars bool RequireGitSSHSecretKnownHosts bool + EnableTektonOCIBundles bool } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -93,6 +96,9 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setFeature(requireGitSSHSecretKnownHostsKey, DefaultRequireGitSSHSecretKnownHosts, &tc.RequireGitSSHSecretKnownHosts); err != nil { return nil, err } + if err := setFeature(enableTektonOCIBundles, DefaultEnableTektonOciBundles, &tc.EnableTektonOCIBundles); err != nil { + return nil, err + } return &tc, nil } diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index 2d74d0ed5c3..2c6c54a41d2 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/google/go-containerregistry/pkg/name" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" "knative.dev/pkg/apis" ) @@ -35,6 +36,7 @@ func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError { // Validate pipelinerun spec func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + cfg := config.FromContextOrDefaults(ctx) // can't have both pipelineRef and pipelineSpec at the same time if (ps.PipelineRef != nil && ps.PipelineRef.Name != "") && ps.PipelineSpec != nil { errs = errs.Also(apis.ErrDisallowedFields("pipelineref", "pipelinespec")) @@ -45,16 +47,22 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) errs = errs.Also(apis.ErrMissingField("pipelineref.name", "pipelinespec")) } - // Check that if a bundle is specified, that a PipelineRef is specified as well. - if (ps.PipelineRef != nil && ps.PipelineRef.Bundle != "") && ps.PipelineRef.Name == "" { - errs = errs.Also(apis.ErrMissingField("pipelineref.name")) - } + // If EnableTektonOCIBundles feature flag is on validate it. + // Otherwise, fail if it is present (as it won't be allowed nor used) + if cfg.FeatureFlags.EnableTektonOCIBundles { + // Check that if a bundle is specified, that a PipelineRef is specified as well. + if (ps.PipelineRef != nil && ps.PipelineRef.Bundle != "") && ps.PipelineRef.Name == "" { + errs = errs.Also(apis.ErrMissingField("pipelineref.name")) + } - // If a bundle url is specified, ensure it is parseable. - if ps.PipelineRef != nil && ps.PipelineRef.Bundle != "" { - if _, err := name.ParseReference(ps.PipelineRef.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid bundle reference (%s)", err.Error()), "pipelineref.bundle")) + // If a bundle url is specified, ensure it is parseable. + if ps.PipelineRef != nil && ps.PipelineRef.Bundle != "" { + if _, err := name.ParseReference(ps.PipelineRef.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid bundle reference (%s)", err.Error()), "pipelineref.bundle")) + } } + } else if ps.PipelineRef != nil && ps.PipelineRef.Bundle != "" { + errs = errs.Also(apis.ErrDisallowedFields("pipelineref.bundle")) } // Validate PipelineSpec if it's present diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go b/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go index 8f52f230075..5472fff9a88 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go @@ -22,11 +22,13 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/test/diff" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + logtesting "knative.dev/pkg/logging/testing" ) func TestPipelineRun_Invalidate(t *testing.T) { @@ -34,6 +36,7 @@ func TestPipelineRun_Invalidate(t *testing.T) { name string pr v1beta1.PipelineRun want *apis.FieldError + wc func(context.Context) context.Context }{ { name: "invalid pipelinerun metadata", @@ -90,6 +93,20 @@ func TestPipelineRun_Invalidate(t *testing.T) { }, }, want: apis.ErrInvalidValue("PipelineRunCancell should be PipelineRunCancelled", "spec.status"), + }, { + name: "use of bundle without the feature flag set", + pr: v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pipelinelineName", + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{ + Name: "my-pipeline", + Bundle: "not a valid reference", + }, + }, + }, + want: apis.ErrDisallowedFields("spec.pipelineref.bundle"), }, { name: "bundle missing name", pr: v1beta1.PipelineRun{ @@ -104,6 +121,7 @@ func TestPipelineRun_Invalidate(t *testing.T) { }, }, want: apis.ErrMissingField("spec.pipelineref.name"), + wc: enableTektonOCIBundles(t), }, { name: "invalid bundle reference", pr: v1beta1.PipelineRun{ @@ -118,14 +136,19 @@ func TestPipelineRun_Invalidate(t *testing.T) { }, }, want: apis.ErrInvalidValue("invalid bundle reference (could not parse reference: not a valid reference)", "spec.pipelineref.bundle"), + wc: enableTektonOCIBundles(t), }, } - for _, ps := range tests { - t.Run(ps.name, func(t *testing.T) { - err := ps.pr.Validate(context.Background()) - if d := cmp.Diff(err.Error(), ps.want.Error()); d != "" { - t.Errorf("PipelineRun.Validate/%s %s", ps.name, diff.PrintWantGot(d)) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + if tc.wc != nil { + ctx = tc.wc(ctx) + } + err := tc.pr.Validate(ctx) + if d := cmp.Diff(err.Error(), tc.want.Error()); d != "" { + t.Errorf("PipelineRun.Validate/%s %s", tc.name, diff.PrintWantGot(d)) } }) } @@ -309,3 +332,16 @@ func TestPipelineRunSpec_Validate(t *testing.T) { }) } } + +func enableTektonOCIBundles(t *testing.T) func(context.Context) context.Context { + return func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-tekton-oci-bundles": "true", + }, + }) + return s.ToContext(ctx) + } +} diff --git a/pkg/apis/pipeline/v1beta1/substitution.go b/pkg/apis/pipeline/v1beta1/substitution.go index e806b988a44..1b0a8880a5c 100644 --- a/pkg/apis/pipeline/v1beta1/substitution.go +++ b/pkg/apis/pipeline/v1beta1/substitution.go @@ -115,7 +115,7 @@ func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { func ApplyReplacements(in string, replacements map[string]string) string { for k, v := range replacements { - in = strings.Replace(in, fmt.Sprintf("$(%s)", k), v, -1) + in = strings.ReplaceAll(in, fmt.Sprintf("$(%s)", k), v) } return in } diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index f82203b52d0..2fd2e25496b 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -163,7 +163,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) before = pr.Status.GetCondition(apis.ConditionSucceeded) } - getPipelineFunc, err := resources.GetPipelineFunc(c.KubeClientSet, c.PipelineClientSet, pr.Spec.PipelineRef, pr.Namespace, pr.Spec.ServiceAccountName) + getPipelineFunc, err := resources.GetPipelineFunc(ctx, c.KubeClientSet, c.PipelineClientSet, pr) if err != nil { logger.Errorf("Failed to fetch pipeline func for pipeline %s: %w", pr.Spec.PipelineRef.Name, err) pr.Status.MarkFailed(ReasonCouldntGetPipeline, "Error retrieving pipeline for pipelinerun %s/%s: %s", @@ -256,7 +256,7 @@ func (c *Reconciler) resolvePipelineState( pst := resources.PipelineRunState{} // Resolve each task individually because they each could have a different reference context (remote or local). for _, task := range tasks { - fn, _, err := tresources.GetTaskFunc(c.KubeClientSet, c.PipelineClientSet, task.TaskRef, pr.Namespace, pr.Spec.ServiceAccountName) + fn, _, err := tresources.GetTaskFunc(ctx, c.KubeClientSet, c.PipelineClientSet, task.TaskRef, pr.Namespace, pr.Spec.ServiceAccountName) if err != nil { // This Run has failed, so we need to mark it as failed and stop reconciling it pr.Status.MarkFailed(ReasonCouldntGetTask, "Pipeline %s/%s can't be Run; task %s could not be fetched: %s", diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 34cde46897b..1a0ce877505 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -59,6 +59,7 @@ import ( "knative.dev/pkg/configmap" "knative.dev/pkg/controller" "knative.dev/pkg/logging" + logtesting "knative.dev/pkg/logging/testing" "knative.dev/pkg/reconciler" ) @@ -4048,6 +4049,16 @@ func (prt PipelineRunTest) reconcileRun(namespace, pipelineRunName string, wantE func TestReconcile_RemotePipelineRef(t *testing.T) { names.TestingSeed() + ctx := context.Background() + cfg := config.NewStore(logtesting.TestLogger(t)) + cfg.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-tekton-oci-bundles": "true", + }, + }) + ctx = cfg.ToContext(ctx) + // Set up a fake registry to push an image to. s := httptest.NewServer(registry.New()) defer s.Close() @@ -4076,6 +4087,14 @@ func TestReconcile_RemotePipelineRef(t *testing.T) { }, }, } + cms := []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()}, + Data: map[string]string{ + "enable-tekton-oci-bundles": "true", + }, + }, + } // This task will be uploaded along with the pipeline definition. remoteTask := tb.Task("unit-test-task", tb.TaskType, tb.TaskSpec(), tb.TaskNamespace("foo")) @@ -4087,13 +4106,11 @@ func TestReconcile_RemotePipelineRef(t *testing.T) { // Unlike the tests above, we do *not* locally define our pipeline or unit-test task. d := test.Data{ - PipelineRuns: prs, - Tasks: []*v1beta1.Task{}, - ClusterTasks: []*v1beta1.ClusterTask{}, - PipelineResources: []*v1alpha1.PipelineResource{}, + PipelineRuns: prs, ServiceAccounts: []*corev1.ServiceAccount{{ ObjectMeta: metav1.ObjectMeta{Name: prs[0].Spec.ServiceAccountName, Namespace: "foo"}, }}, + ConfigMaps: cms, } prt := NewPipelineRunTest(d, t) diff --git a/pkg/reconciler/pipelinerun/resources/pipelineref.go b/pkg/reconciler/pipelinerun/resources/pipelineref.go index b6080649839..08507ccbd28 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelineref.go +++ b/pkg/reconciler/pipelinerun/resources/pipelineref.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/google/go-containerregistry/pkg/authn/k8schain" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" @@ -32,16 +33,19 @@ import ( // GetPipelineFunc is a factory function that will use the given PipelineRef to return a valid GetPipeline function that // looks up the pipeline. It uses as context a k8s client, tekton client, namespace, and service account name to return // the pipeline. It knows whether it needs to look in the cluster or in a remote image to fetch the reference. -func GetPipelineFunc(k8s kubernetes.Interface, tekton clientset.Interface, pr *v1beta1.PipelineRef, namespace, saName string) (GetPipeline, error) { +func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, pipelineRun *v1beta1.PipelineRun) (GetPipeline, error) { + cfg := config.FromContextOrDefaults(ctx) + pr := pipelineRun.Spec.PipelineRef + namespace := pipelineRun.Namespace switch { - case pr != nil && pr.Bundle != "": + case cfg.FeatureFlags.EnableTektonOCIBundles && pr != nil && pr.Bundle != "": // Return an inline function that implements GetTask by calling Resolver.Get with the specified task type and // casting it to a PipelineInterface. return func(ctx context.Context, name string) (v1beta1.PipelineInterface, error) { // If there is a bundle url at all, construct an OCI resolver to fetch the pipeline. kc, err := k8schain.New(ctx, k8s, k8schain.Options{ Namespace: namespace, - ServiceAccountName: saName, + ServiceAccountName: pipelineRun.Spec.ServiceAccountName, }) if err != nil { return nil, fmt.Errorf("failed to get keychain: %w", err) @@ -58,7 +62,7 @@ func GetPipelineFunc(k8s kubernetes.Interface, tekton clientset.Interface, pr *v if pipeline, ok := obj.(*v1alpha1.Pipeline); ok { betaPipeline := &v1beta1.Pipeline{} - err := pipeline.ConvertTo(context.Background(), betaPipeline) + err := pipeline.ConvertTo(ctx, betaPipeline) return betaPipeline, err } diff --git a/pkg/reconciler/pipelinerun/resources/pipelineref_test.go b/pkg/reconciler/pipelinerun/resources/pipelineref_test.go index 30ff4333f20..90eb7638311 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelineref_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelineref_test.go @@ -25,15 +25,18 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-containerregistry/pkg/registry" tb "github.com/tektoncd/pipeline/internal/builder/v1beta1" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources" "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" fakek8s "k8s.io/client-go/kubernetes/fake" + logtesting "knative.dev/pkg/logging/testing" ) var ( @@ -104,6 +107,16 @@ func TestGetPipelineFunc(t *testing.T) { t.Fatal(err) } + ctx := context.Background() + cfg := config.NewStore(logtesting.TestLogger(t)) + cfg.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-tekton-oci-bundles": "true", + }, + }) + ctx = cfg.ToContext(ctx) + testcases := []struct { name string localPipelines []runtime.Object @@ -151,7 +164,13 @@ func TestGetPipelineFunc(t *testing.T) { t.Fatalf("failed to upload test image: %s", err.Error()) } - fn, err := resources.GetPipelineFunc(kubeclient, tektonclient, tc.ref, "default", "default") + fn, err := resources.GetPipelineFunc(ctx, kubeclient, tektonclient, &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: tc.ref, + ServiceAccountName: "default", + }, + }) if err != nil { t.Fatalf("failed to get pipeline fn: %s", err.Error()) } diff --git a/pkg/reconciler/taskrun/resources/taskref.go b/pkg/reconciler/taskrun/resources/taskref.go index 3d71dcc22f6..5ef84f60d98 100644 --- a/pkg/reconciler/taskrun/resources/taskref.go +++ b/pkg/reconciler/taskrun/resources/taskref.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/google/go-containerregistry/pkg/authn/k8schain" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" @@ -34,14 +35,15 @@ import ( // also requires a kubeclient, tektonclient, namespace, and service account in case it needs to find that task in // cluster or authorize against an external repositroy. It will figure out whether it needs to look in the cluster or in // a remote image to fetch the reference. It will also return the "kind" of the task being referenced. -func GetTaskFunc(k8s kubernetes.Interface, tekton clientset.Interface, tr *v1beta1.TaskRef, namespace, saName string) (GetTask, v1beta1.TaskKind, error) { +func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, tr *v1beta1.TaskRef, namespace, saName string) (GetTask, v1beta1.TaskKind, error) { + cfg := config.FromContextOrDefaults(ctx) kind := v1alpha1.NamespacedTaskKind if tr != nil && tr.Kind != "" { kind = tr.Kind } switch { - case tr != nil && tr.Bundle != "": + case cfg.FeatureFlags.EnableTektonOCIBundles && tr != nil && tr.Bundle != "": // Return an inline function that implements GetTask by calling Resolver.Get with the specified task type and // casting it to a TaskInterface. return func(ctx context.Context, name string) (v1beta1.TaskInterface, error) { @@ -73,11 +75,11 @@ func GetTaskFunc(k8s kubernetes.Interface, tekton clientset.Interface, tr *v1bet switch tt := obj.(type) { case *v1alpha1.Task: betaTask := &v1beta1.Task{} - err := tt.ConvertTo(context.Background(), betaTask) + err := tt.ConvertTo(ctx, betaTask) return betaTask, err case *v1alpha1.ClusterTask: betaTask := &v1beta1.ClusterTask{} - err := tt.ConvertTo(context.Background(), betaTask) + err := tt.ConvertTo(ctx, betaTask) return betaTask, err } diff --git a/pkg/reconciler/taskrun/resources/taskref_test.go b/pkg/reconciler/taskrun/resources/taskref_test.go index cc4dfbb3d80..582643dde76 100644 --- a/pkg/reconciler/taskrun/resources/taskref_test.go +++ b/pkg/reconciler/taskrun/resources/taskref_test.go @@ -25,16 +25,19 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-containerregistry/pkg/registry" tb "github.com/tektoncd/pipeline/internal/builder/v1beta1" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" fakek8s "k8s.io/client-go/kubernetes/fake" + logtesting "knative.dev/pkg/logging/testing" ) func TestLocalTaskRef(t *testing.T) { @@ -117,6 +120,16 @@ func TestGetTaskFunc(t *testing.T) { t.Fatal(err) } + ctx := context.Background() + cfg := config.NewStore(logtesting.TestLogger(t)) + cfg.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-tekton-oci-bundles": "true", + }, + }) + ctx = cfg.ToContext(ctx) + testcases := []struct { name string localTasks []runtime.Object @@ -213,7 +226,7 @@ func TestGetTaskFunc(t *testing.T) { t.Fatalf("failed to upload test image: %s", err.Error()) } - fn, kind, err := resources.GetTaskFunc(kubeclient, tektonclient, tc.ref, "default", "default") + fn, kind, err := resources.GetTaskFunc(ctx, kubeclient, tektonclient, tc.ref, "default", "default") if err != nil { t.Fatalf("failed to get task fn: %s", err.Error()) } @@ -222,7 +235,7 @@ func TestGetTaskFunc(t *testing.T) { t.Errorf("expected kind %s did not match actual kind %s", tc.expectedKind, kind) } - task, err := fn(context.Background(), tc.ref.Name) + task, err := fn(ctx, tc.ref.Name) if err != nil { t.Fatalf("failed to call taskfn: %s", err.Error()) } diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index d158eb53d68..afbb0178b1c 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -253,7 +253,7 @@ func (c *Reconciler) prepare(ctx context.Context, tr *v1beta1.TaskRun) (*v1beta1 // and may not have had all of the assumed default specified. tr.SetDefaults(contexts.WithUpgradeViaDefaulting(ctx)) - getTaskfunc, kind, err := resources.GetTaskFunc(c.KubeClientSet, c.PipelineClientSet, tr.Spec.TaskRef, tr.Namespace, tr.Spec.ServiceAccountName) + getTaskfunc, kind, err := resources.GetTaskFunc(ctx, c.KubeClientSet, c.PipelineClientSet, tr.Spec.TaskRef, tr.Namespace, tr.Spec.ServiceAccountName) if err != nil { logger.Errorf("Failed to fetch task reference %s: %v", tr.Spec.TaskRef.Name) tr.Status.SetCondition(&apis.Condition{ diff --git a/test/tektonbundles_test.go b/test/tektonbundles_test.go index d0c21888f8e..31bb6a44c64 100644 --- a/test/tektonbundles_test.go +++ b/test/tektonbundles_test.go @@ -37,153 +37,21 @@ import ( "github.com/google/go-containerregistry/pkg/v1/layout" "github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/pod" + "github.com/tektoncd/pipeline/pkg/system" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" knativetest "knative.dev/pkg/test" ) -func tarImageInOCIFormat(namespace string, img v1.Image) ([]byte, error) { - // Write the image in the OCI layout and then tar it up. - dir, err := ioutil.TempDir(os.TempDir(), namespace) - if err != nil { - return nil, err - } - - p, err := layout.Write(dir, empty.Index) - if err != nil { - return nil, err - } - - if err := p.AppendImage(img); err != nil { - return nil, err - } - - // Now that the layout is correct, package this up as a tarball. - var buf bytes.Buffer - tw := tar.NewWriter(&buf) - if err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - // Generate the initial tar header. - header, err := tar.FileInfoHeader(info, path) - if err != nil { - return err - } - // Rewrite the filename with a relative path to the root dir. - header.Name = strings.Replace(path, dir, "", 1) - if err := tw.WriteHeader(header); err != nil { - return err - } - // If not a dir, write file content as is. - if !info.IsDir() { - data, err := os.Open(path) - if err != nil { - return err - } - if _, err := io.Copy(tw, data); err != nil { - return err - } - } - return nil - }); err != nil { - return nil, err - } - - if err := tw.Close(); err != nil { - return nil, err - } - - // Pull out the tar bundle into a bytes object. - return ioutil.ReadAll(&buf) -} - -// publishImg will generate a Pod that runs in the namespace to publish an OCI compliant image into the local registry -// that runs in the cluster. We can't speak to the in-cluster registry from the test so we need to run a Pod to do it -// for us. -func publishImg(ctx context.Context, t *testing.T, c *clients, namespace string, img v1.Image, ref name.Reference) { - t.Helper() - podName := "publish-tekton-bundle" - - tb, err := tarImageInOCIFormat(namespace, img) - if err != nil { - t.Fatalf("Failed to create OCI tar bundle: %s", err) - } - - // Create a configmap to contain the tarball which we will mount in the pod. - cmName := namespace + "uploadimage-cm" - if _, err = c.KubeClient.Kube.CoreV1().ConfigMaps(namespace).Create(ctx, &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: cmName}, - BinaryData: map[string][]byte{ - "image.tar": tb, - }, - }, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create configmap to upload image: %s", err) - } - - po, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(ctx, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - GenerateName: podName, - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{{ - Name: "cm", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: cmName}}, - }, - }, { - Name: "scratch", - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - }}, - InitContainers: []corev1.Container{{ - Name: "untar", - Image: "busybox", - Command: []string{"/bin/sh", "-c"}, - Args: []string{"mkdir -p /var/image && tar xvf /var/cm/image.tar -C /var/image"}, - VolumeMounts: []corev1.VolumeMount{{ - Name: "cm", - MountPath: "/var/cm", - }, { - Name: "scratch", - MountPath: "/var/image", - }}, - }}, - Containers: []corev1.Container{{ - Name: "skopeo", - Image: "gcr.io/tekton-releases/dogfooding/skopeo:latest", - WorkingDir: "/var", - Command: []string{"/bin/sh", "-c"}, - Args: []string{"skopeo copy --dest-tls-verify=false oci:image docker://" + ref.String()}, - VolumeMounts: []corev1.VolumeMount{{ - Name: "scratch", - MountPath: "/var/image", - }}, - }}, - RestartPolicy: corev1.RestartPolicyNever, - }, - }, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Failed to create the skopeo pod: %v", err) - } - if err := WaitForPodState(ctx, c, po.Name, namespace, func(pod *corev1.Pod) (bool, error) { - return pod.Status.Phase == "Succeeded", nil - }, "PodContainersTerminated"); err != nil { - req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(po.GetName(), &corev1.PodLogOptions{Container: "skopeo"}) - logs, err := req.DoRaw(ctx) - if err != nil { - t.Fatalf("Error waiting for Pod %q to terminate: %v", podName, err) - } else { - t.Fatalf("Failed to create image. Pod logs are: \n%s", string(logs)) - } - } -} - // TestTektonBundlesSimpleWorkingExample is an integration test which tests a simple, working Tekton bundle using OCI // images. func TestTektonBundlesSimpleWorkingExample(t *testing.T) { ctx := context.Background() - c, namespace := setup(ctx, t, withRegistry) + c, namespace := setup(ctx, t, withRegistry, skipIfTektonOCIBundleDisabled) t.Parallel() @@ -323,7 +191,7 @@ func TestTektonBundlesSimpleWorkingExample(t *testing.T) { // TestTektonBundlesUsingRegularImage is an integration test which passes a non-Tekton bundle as a task reference. func TestTektonBundlesUsingRegularImage(t *testing.T) { ctx := context.Background() - c, namespace := setup(ctx, t, withRegistry) + c, namespace := setup(ctx, t, withRegistry, skipIfTektonOCIBundleDisabled) t.Parallel() @@ -408,7 +276,7 @@ func TestTektonBundlesUsingRegularImage(t *testing.T) { // task reference. func TestTektonBundlesUsingImproperFormat(t *testing.T) { ctx := context.Background() - c, namespace := setup(ctx, t, withRegistry) + c, namespace := setup(ctx, t, withRegistry, skipIfTektonOCIBundleDisabled) t.Parallel() @@ -515,3 +383,148 @@ func TestTektonBundlesUsingImproperFormat(t *testing.T) { t.Fatalf("Error waiting for PipelineRun to finish with expected error: %s", err) } } + +func tarImageInOCIFormat(namespace string, img v1.Image) ([]byte, error) { + // Write the image in the OCI layout and then tar it up. + dir, err := ioutil.TempDir(os.TempDir(), namespace) + if err != nil { + return nil, err + } + + p, err := layout.Write(dir, empty.Index) + if err != nil { + return nil, err + } + + if err := p.AppendImage(img); err != nil { + return nil, err + } + + // Now that the layout is correct, package this up as a tarball. + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + if err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + // Generate the initial tar header. + header, err := tar.FileInfoHeader(info, path) + if err != nil { + return err + } + // Rewrite the filename with a relative path to the root dir. + header.Name = strings.Replace(path, dir, "", 1) + if err := tw.WriteHeader(header); err != nil { + return err + } + // If not a dir, write file content as is. + if !info.IsDir() { + data, err := os.Open(path) + if err != nil { + return err + } + if _, err := io.Copy(tw, data); err != nil { + return err + } + } + return nil + }); err != nil { + return nil, err + } + + if err := tw.Close(); err != nil { + return nil, err + } + + // Pull out the tar bundle into a bytes object. + return ioutil.ReadAll(&buf) +} + +// publishImg will generate a Pod that runs in the namespace to publish an OCI compliant image into the local registry +// that runs in the cluster. We can't speak to the in-cluster registry from the test so we need to run a Pod to do it +// for us. +func publishImg(ctx context.Context, t *testing.T, c *clients, namespace string, img v1.Image, ref name.Reference) { + t.Helper() + podName := "publish-tekton-bundle" + + tb, err := tarImageInOCIFormat(namespace, img) + if err != nil { + t.Fatalf("Failed to create OCI tar bundle: %s", err) + } + + // Create a configmap to contain the tarball which we will mount in the pod. + cmName := namespace + "uploadimage-cm" + if _, err = c.KubeClient.Kube.CoreV1().ConfigMaps(namespace).Create(ctx, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: cmName}, + BinaryData: map[string][]byte{ + "image.tar": tb, + }, + }, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create configmap to upload image: %s", err) + } + + po, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + GenerateName: podName, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "cm", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: cmName}}, + }, + }, { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }}, + InitContainers: []corev1.Container{{ + Name: "untar", + Image: "busybox", + Command: []string{"/bin/sh", "-c"}, + Args: []string{"mkdir -p /var/image && tar xvf /var/cm/image.tar -C /var/image"}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "cm", + MountPath: "/var/cm", + }, { + Name: "scratch", + MountPath: "/var/image", + }}, + }}, + Containers: []corev1.Container{{ + Name: "skopeo", + Image: "gcr.io/tekton-releases/dogfooding/skopeo:latest", + WorkingDir: "/var", + Command: []string{"/bin/sh", "-c"}, + Args: []string{"skopeo copy --dest-tls-verify=false oci:image docker://" + ref.String()}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "scratch", + MountPath: "/var/image", + }}, + }}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create the skopeo pod: %v", err) + } + if err := WaitForPodState(ctx, c, po.Name, namespace, func(pod *corev1.Pod) (bool, error) { + return pod.Status.Phase == "Succeeded", nil + }, "PodContainersTerminated"); err != nil { + req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(po.GetName(), &corev1.PodLogOptions{Container: "skopeo"}) + logs, err := req.DoRaw(ctx) + if err != nil { + t.Fatalf("Error waiting for Pod %q to terminate: %v", podName, err) + } else { + t.Fatalf("Failed to create image. Pod logs are: \n%s", string(logs)) + } + } +} + +func skipIfTektonOCIBundleDisabled(ctx context.Context, t *testing.T, c *clients, namespace string) { + featureFlagsCM, err := c.KubeClient.Kube.CoreV1().ConfigMaps(system.GetNamespace()).Get(ctx, config.GetFeatureFlagsConfigName(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err) + } + enableTetkonOCIBundle, ok := featureFlagsCM.Data["enable-tekton-oci-bundles"] + if !ok || enableTetkonOCIBundle != "true" { + t.Skip("Skip because enable-tekton-oci-bundles != true") + } +}