From efeb1db708388bf46bf921a0a0944e2355699494 Mon Sep 17 00:00:00 2001 From: Jonas Pettersson Date: Sat, 4 Apr 2020 19:58:54 +0200 Subject: [PATCH] Add volumeClaimTemplate as a Workspace volume source An existing PersistentVolumeClaim can currently be used as a Workspace volume source. There is two ways of using an existing PVC as volume: - Reuse an existing PVC - Create a new PVC before each PipelineRun. There is disadvantages by reusing the same PVC for every PipelineRun: - You need to clean the PVC at the end of the Pipeline - All Tasks using the workspace will be scheduled to the node where the PV is bound - Concurrent PipelineRuns may interfere, an artifact or file from one PipelineRun may slip in to or be used in another PipelineRun, with very few audit tracks. There is also disadvantages by creating a new PVC before each PipelineRun: - This can not (easily) be done declaratively - This is hard to do programmatically, because it is hard to know when to delete the PVC. The PipelineRun can not be set as OwnerReference since the PVC must be created first This commit adds 'volumeClaimTemplate' as a volume source for workspaces. This has several advantages: - The syntax is used in k8s StatefulSet and other k8s projects so it is familiar in the kubernetes ecosystem - It is possible to declaratively declare that a PVC should be created for each PipelineRun, e.g. from a TriggerTemplate. - The user can choose storageClass (or omit to get the cluster default) to e.g. get a faster SSD volume, or to get a volume compatible with e.g. Windows. - The user can adapt the size to the job, e.g. use 5Gi for apps that contains machine learning models, or 1Gi for microservice apps. It can be changed on demand in a configuration that lives in the users namespace e.g. in a TriggerTemplate. - The size affects the storage quota that is set on the namespace and it may affect billing and cost depending on the cluster environment. - The PipelineRun or TaskRun with the template is created first, and is used as OwnerReference on the PVC. That means that the PVC will have the same lifecycle as the PipelineRun. Related to #1986 See also: - #2174 - #2218 - https://github.com/tektoncd/triggers/issues/476 - https://github.com/tektoncd/triggers/issues/482 - https://github.com/kubeflow/kfp-tekton/issues/51 --- docs/workspaces.md | 12 ++- .../pipeline/v1alpha1/pipelinerun_types.go | 11 +++ .../v1alpha1/pipelinerun_types_test.go | 19 ++++ pkg/apis/pipeline/v1alpha1/taskrun_types.go | 25 +++++ .../pipeline/v1alpha1/taskrun_types_test.go | 19 ++++ pkg/apis/pipeline/v1beta1/workspace_types.go | 4 + .../pipeline/v1beta1/workspace_validation.go | 4 + .../v1beta1/workspace_validation_test.go | 20 ++++ .../pipeline/v1beta1/zz_generated.deepcopy.go | 5 + pkg/reconciler/pipelinerun/controller.go | 2 + pkg/reconciler/pipelinerun/pipelinerun.go | 41 +++++++- .../pipelinerun/pipelinerun_test.go | 81 ++++++++++++++++ .../resources/pipelinerunresolution.go | 10 ++ .../resources/pipelinerunresolution_test.go | 12 +++ pkg/reconciler/taskrun/controller.go | 2 + pkg/reconciler/taskrun/taskrun.go | 41 ++++++++ pkg/reconciler/taskrun/taskrun_test.go | 51 ++++++++++ pkg/reconciler/volumeclaim/pvchandler.go | 73 ++++++++++++++ pkg/reconciler/volumeclaim/pvchandler_test.go | 95 +++++++++++++++++++ test/builder/pipeline.go | 15 +++ test/builder/task.go | 11 +++ 21 files changed, 547 insertions(+), 6 deletions(-) create mode 100644 pkg/reconciler/volumeclaim/pvchandler.go create mode 100644 pkg/reconciler/volumeclaim/pvchandler_test.go diff --git a/docs/workspaces.md b/docs/workspaces.md index 1054af5cbaf..a60062d19d1 100644 --- a/docs/workspaces.md +++ b/docs/workspaces.md @@ -19,8 +19,8 @@ `Workspaces` allow `Tasks` to declare parts of the filesystem that need to be provided at runtime by `TaskRuns`. A `TaskRun` can make these parts of the filesystem available -in many ways: using a read-only `ConfigMap` or `Secret`, a `PersistentVolumeClaim` -shared with other Tasks, or simply an `emptyDir` that is discarded when the `TaskRun` +in many ways: using a read-only `ConfigMap` or `Secret`, an existing `PersistentVolumeClaim` +shared with other Tasks, create a `PersistentVolumeClaim` from a provided `VolumeClaimTemplate`, or simply an `emptyDir` that is discarded when the `TaskRun` completes. `Workspaces` are similar to `Volumes` except that they allow a `Task` author @@ -294,9 +294,15 @@ However, they work well for single `TaskRuns` where the data stored in the `empt #### `persistentVolumeClaim` -The `persistentVolumeClaim` field references a [`persistentVolumeClaim` volume](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim). +The `persistentVolumeClaim` field references an existing [`persistentVolumeClaim` volume](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim). `PersistentVolumeClaim` volumes are a good choice for sharing data among `Tasks` within a `Pipeline`. +#### `volumeClaimTemplate` + +The `volumeClaimTemplate` is a template of a [`persistentVolumeClaim` volume](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim), created for each `PipelineRun` or `TaskRun`. +When the volume is created from a template in a `PipelineRun` or `TaskRun` it will be deleted when the `PipelineRun` or `TaskRun` is deleted. +`volumeClaimTemplate` volumes are a good choice for sharing data among `Tasks` within a `Pipeline` when the volume is only used during a `PipelineRun` or `TaskRun`. + #### `configMap` The `configMap` field references a [`configMap` volume](https://kubernetes.io/docs/concepts/storage/volumes/#configmap). diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go index e17d0908f46..90d2614c787 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go @@ -208,3 +208,14 @@ func (pr *PipelineRun) GetServiceAccountName(pipelineTaskName string) string { } return serviceAccountName } + +// HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is +// used for creating PersistentVolumeClaims with an OwnerReference for each run +func (pr *PipelineRun) HasVolumeClaimTemplate() bool { + for _, ws := range pr.Spec.Workspaces { + if ws.VolumeClaimTemplate != nil { + return true + } + } + return false +} diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go index 4613bdf414a..30145d577a3 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go @@ -130,6 +130,25 @@ func TestPipelineRunIsCancelled(t *testing.T) { } } +func TestPipelineRunHasVolumeClaimTemplate(t *testing.T) { + pr := &v1alpha1.PipelineRun{ + Spec: v1alpha1.PipelineRunSpec{ + Workspaces: []v1alpha1.WorkspaceBinding{{ + Name: "my-workspace", + VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + }, + Spec: corev1.PersistentVolumeClaimSpec{}, + }, + }}, + }, + } + if !pr.HasVolumeClaimTemplate() { + t.Fatal("Expected pipelinerun to have a volumeClaimTemplate workspace") + } +} + func TestPipelineRunKey(t *testing.T) { pr := tb.PipelineRun("prunname", "testns") expectedKey := fmt.Sprintf("PipelineRun/%p", pr) diff --git a/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/pkg/apis/pipeline/v1alpha1/taskrun_types.go index 8ff07627a1f..dc947b32613 100644 --- a/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ b/pkg/apis/pipeline/v1alpha1/taskrun_types.go @@ -23,9 +23,18 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "knative.dev/pkg/apis" ) +var ( + taskRunGroupVersionKind = schema.GroupVersionKind{ + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, + Kind: pipeline.TaskRunControllerName, + } +) + // TaskRunSpec defines the desired state of TaskRun type TaskRunSpec struct { // +optional @@ -165,6 +174,11 @@ func (tr *TaskRun) GetBuildPodRef() corev1.ObjectReference { } } +// GetOwnerReference gets the task run as owner reference for any related objects +func (tr *TaskRun) GetOwnerReference() metav1.OwnerReference { + return *metav1.NewControllerRef(tr, taskRunGroupVersionKind) +} + // GetPipelineRunPVCName for taskrun gets pipelinerun func (tr *TaskRun) GetPipelineRunPVCName() string { if tr == nil { @@ -228,3 +242,14 @@ func (tr *TaskRun) IsPartOfPipeline() (bool, string, string) { return false, "", "" } + +// HasVolumeClaimTemplate returns true if TaskRun contains volumeClaimTemplates that is +// used for creating PersistentVolumeClaims with an OwnerReference for each run +func (tr *TaskRun) HasVolumeClaimTemplate() bool { + for _, ws := range tr.Spec.Workspaces { + if ws.VolumeClaimTemplate != nil { + return true + } + } + return false +} diff --git a/pkg/apis/pipeline/v1alpha1/taskrun_types_test.go b/pkg/apis/pipeline/v1alpha1/taskrun_types_test.go index 2842554ac59..65d66d0b791 100644 --- a/pkg/apis/pipeline/v1alpha1/taskrun_types_test.go +++ b/pkg/apis/pipeline/v1alpha1/taskrun_types_test.go @@ -112,6 +112,25 @@ func TestTaskRunIsCancelled(t *testing.T) { } } +func TestTaskRunHasVolumeClaimTemplate(t *testing.T) { + tr := &v1alpha1.TaskRun{ + Spec: v1alpha1.TaskRunSpec{ + Workspaces: []v1alpha1.WorkspaceBinding{{ + Name: "my-workspace", + VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + }, + Spec: corev1.PersistentVolumeClaimSpec{}, + }, + }}, + }, + } + if !tr.HasVolumeClaimTemplate() { + t.Fatal("Expected taskrun to have a volumeClaimTemplate workspace") + } +} + func TestTaskRunKey(t *testing.T) { tr := tb.TaskRun("taskrunname", "") expectedKey := fmt.Sprintf("TaskRun/%p", tr) diff --git a/pkg/apis/pipeline/v1beta1/workspace_types.go b/pkg/apis/pipeline/v1beta1/workspace_types.go index f0eab019486..94d50b30121 100644 --- a/pkg/apis/pipeline/v1beta1/workspace_types.go +++ b/pkg/apis/pipeline/v1beta1/workspace_types.go @@ -55,6 +55,10 @@ type WorkspaceBinding struct { // for this binding (i.e. the volume will be mounted at this sub directory). // +optional SubPath string `json:"subPath,omitempty"` + // VolumeClaimTemplate is a template for a claim that will be created in the same namespace. + // The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun. + // +optional + VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` // PersistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used. // +optional diff --git a/pkg/apis/pipeline/v1beta1/workspace_validation.go b/pkg/apis/pipeline/v1beta1/workspace_validation.go index 99e0a1b9ef5..624f134216b 100644 --- a/pkg/apis/pipeline/v1beta1/workspace_validation.go +++ b/pkg/apis/pipeline/v1beta1/workspace_validation.go @@ -27,6 +27,7 @@ import ( // WorkspaceBinding may include. var allVolumeSourceFields []string = []string{ "workspace.persistentvolumeclaim", + "workspace.volumeclaimtemplate", "workspace.emptydir", "workspace.configmap", "workspace.secret", @@ -72,6 +73,9 @@ func (b *WorkspaceBinding) Validate(ctx context.Context) *apis.FieldError { // has been configured with. func (b *WorkspaceBinding) numSources() int { n := 0 + if b.VolumeClaimTemplate != nil { + n++ + } if b.PersistentVolumeClaim != nil { n++ } diff --git a/pkg/apis/pipeline/v1beta1/workspace_validation_test.go b/pkg/apis/pipeline/v1beta1/workspace_validation_test.go index 9316e0f1e63..d7828903328 100644 --- a/pkg/apis/pipeline/v1beta1/workspace_validation_test.go +++ b/pkg/apis/pipeline/v1beta1/workspace_validation_test.go @@ -21,6 +21,8 @@ import ( "testing" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestWorkspaceBindingValidateValid(t *testing.T) { @@ -35,6 +37,24 @@ func TestWorkspaceBindingValidateValid(t *testing.T) { ClaimName: "pool-party", }, }, + }, { + name: "Valid volumeClaimTemplate", + binding: &WorkspaceBinding{ + Name: "beth", + VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypvc", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "storage": resource.MustParse("1Gi"), + }, + }, + }, + }, + }, }, { name: "Valid emptyDir", binding: &WorkspaceBinding{ diff --git a/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go index e629bd49efb..7061d6fb661 100644 --- a/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go @@ -1519,6 +1519,11 @@ func (in *TaskSpec) DeepCopy() *TaskSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { *out = *in + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } if in.PersistentVolumeClaim != nil { in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim *out = new(v1.PersistentVolumeClaimVolumeSource) diff --git a/pkg/reconciler/pipelinerun/controller.go b/pkg/reconciler/pipelinerun/controller.go index 875abbbcab9..dfccf38b327 100644 --- a/pkg/reconciler/pipelinerun/controller.go +++ b/pkg/reconciler/pipelinerun/controller.go @@ -31,6 +31,7 @@ import ( resourceinformer "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource" "github.com/tektoncd/pipeline/pkg/reconciler" "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/config" + "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "k8s.io/client-go/tools/cache" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/configmap" @@ -80,6 +81,7 @@ func NewController(images pipeline.Images) func(context.Context, configmap.Watch conditionLister: conditionInformer.Lister(), timeoutHandler: timeoutHandler, metrics: metrics, + pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), } impl := controller.NewImpl(c, c.Logger, pipeline.PipelineRunControllerName) diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index ed90efee8b9..7a49fed87a5 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -36,6 +36,7 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun" + "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -105,6 +106,7 @@ type Reconciler struct { configStore configStore timeoutHandler *reconciler.TimeoutSet metrics *Recorder + pvcHandler volumeclaim.PvcHandler } var ( @@ -436,6 +438,21 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1alpha1.PipelineRun) er return err } + if pipelineState.IsBeforeFirstTaskRun() && pr.HasVolumeClaimTemplate() { + // create workspace PVC from template + if err = c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(pr.Spec.Workspaces, pr.GetOwnerReference()[0], pr.Namespace); err != nil { + c.Logger.Errorf("Failed to create PVC for PipelineRun %s: %v", pr.Name, err) + pr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: volumeclaim.ReasonCouldntCreateWorkspacePVC, + Message: fmt.Sprintf("Failed to create PVC for PipelineRun %s Workspaces correctly: %s", + fmt.Sprintf("%s/%s", pr.Namespace, pr.Name), err), + }) + return nil + } + } + candidateTasks, err := dag.GetSchedulable(d, pipelineState.SuccessfulPipelineTaskNames()...) if err != nil { c.Logger.Errorf("Error getting potential next tasks for valid pipelinerun %s: %v", pr.Name, err) @@ -603,9 +620,7 @@ func (c *Reconciler) createTaskRun(rprt *resources.ResolvedPipelineRunTask, pr * for _, ws := range rprt.PipelineTask.Workspaces { taskWorkspaceName, pipelineWorkspaceName := ws.Name, ws.Workspace if b, hasBinding := pipelineRunWorkspaces[pipelineWorkspaceName]; hasBinding { - binding := *b.DeepCopy() - binding.Name = taskWorkspaceName - tr.Spec.Workspaces = append(tr.Spec.Workspaces, binding) + tr.Spec.Workspaces = append(tr.Spec.Workspaces, taskWorkspaceByWorkspaceVolumeSource(b, taskWorkspaceName, pr.GetOwnerReference()[0])) } else { return nil, fmt.Errorf("expected workspace %q to be provided by pipelinerun for pipeline task %q", pipelineWorkspaceName, rprt.PipelineTask.Name) } @@ -616,6 +631,26 @@ func (c *Reconciler) createTaskRun(rprt *resources.ResolvedPipelineRunTask, pr * return c.PipelineClientSet.TektonV1alpha1().TaskRuns(pr.Namespace).Create(tr) } +// taskWorkspaceByWorkspaceVolumeSource is returning the WorkspaceBinding with the TaskRun specified name. +// If the volume source is a volumeClaimTemplate, the template is applied and passed to TaskRun as a persistentVolumeClaim +func taskWorkspaceByWorkspaceVolumeSource(wb v1alpha1.WorkspaceBinding, taskWorkspaceName string, owner metav1.OwnerReference) v1alpha1.WorkspaceBinding { + if wb.VolumeClaimTemplate == nil { + binding := *wb.DeepCopy() + binding.Name = taskWorkspaceName + return binding + } + + // apply template + binding := v1alpha1.WorkspaceBinding{ + SubPath: wb.SubPath, + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: volumeclaim.GetPersistentVolumeClaimName(wb.VolumeClaimTemplate, wb, owner), + }, + } + binding.Name = taskWorkspaceName + return binding +} + func addRetryHistory(tr *v1alpha1.TaskRun) { newStatus := *tr.Status.DeepCopy() newStatus.RetriesStatus = nil diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 1b7ed45ec92..736b13c7c0a 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -1778,3 +1778,84 @@ func TestReconcileWithTaskResults(t *testing.T) { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, d) } } + +// TestReconcileWithVolumeClaimTemplateWorkspace tests that given a pipeline with volumeClaimTemplate workspace, +// a PVC is created and that the workspace appears as a PersistentVolumeClaim workspace for TaskRuns. +func TestReconcileWithVolumeClaimTemplateWorkspace(t *testing.T) { + workspaceName := "ws1" + claimName := "myclaim" + pipelineRunName := "test-pipeline-run" + ps := []*v1alpha1.Pipeline{tb.Pipeline("test-pipeline", "foo", tb.PipelineSpec( + tb.PipelineTask("hello-world-1", "hello-world", tb.PipelineTaskWorkspaceBinding("taskWorkspaceName", workspaceName)), + tb.PipelineTask("hello-world-2", "hello-world"), + tb.PipelineWorkspaceDeclaration(workspaceName), + ))} + + prs := []*v1alpha1.PipelineRun{tb.PipelineRun(pipelineRunName, "foo", + tb.PipelineRunSpec("test-pipeline", tb.PipelineRunWorkspaceBindingVolumeClaimTemplate(workspaceName, claimName))), + } + ts := []*v1alpha1.Task{tb.Task("hello-world", "foo")} + + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: ts, + } + + testAssets, cancel := getPipelineRunController(t, d) + defer cancel() + c := testAssets.Controller + clients := testAssets.Clients + + err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run") + if err != nil { + t.Errorf("Did not expect to see error when reconciling PipelineRun but saw %s", err) + } + + // Check that the PipelineRun was reconciled correctly + reconciledRun, err := clients.Pipeline.TektonV1alpha1().PipelineRuns("foo").Get("test-pipeline-run", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err) + } + + // Check that the expected PVC was created + pvcNames := make([]string, 0) + for _, a := range clients.Kube.Actions() { + if ca, ok := a.(ktesting.CreateAction); ok { + obj := ca.GetObject() + if pvc, ok := obj.(*corev1.PersistentVolumeClaim); ok { + pvcNames = append(pvcNames, pvc.Name) + } + } + } + + if len(pvcNames) != 1 { + t.Errorf("expected one PVC created. %d was created", len(pvcNames)) + } + + expectedPVCName := fmt.Sprintf("%s-%s-%s", claimName, workspaceName, pipelineRunName) + if pvcNames[0] != expectedPVCName { + t.Errorf("expected the created PVC to be named %s. It was named %s", expectedPVCName, pvcNames[0]) + } + + taskRuns, err := clients.Pipeline.TektonV1alpha1().TaskRuns("foo").List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error when listing TaskRuns: %v", err) + } + + for _, tr := range taskRuns.Items { + for _, ws := range tr.Spec.Workspaces { + if ws.VolumeClaimTemplate != nil { + t.Fatalf("found volumeClaimTemplate workspace. Did not expect to find any taskruns with volumeClaimTemplate workspaces") + } + + if ws.PersistentVolumeClaim == nil { + t.Fatalf("found taskRun workspace that is not PersistentVolumeClaim workspace. Did only expect PersistentVolumeClaims workspaces") + } + } + } + + if !reconciledRun.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() { + t.Errorf("Expected PipelineRun to be running, but condition status is %s", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) + } +} diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 13a1ea65f1c..37a79cba100 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -152,6 +152,16 @@ func (state PipelineRunState) IsDone() (isDone bool) { return } +func (state PipelineRunState) IsBeforeFirstTaskRun() (isBeforeFirstTaskRun bool) { + isBeforeFirstTaskRun = true + for _, t := range state { + if t.TaskRun != nil { + return false + } + } + return +} + // GetNextTasks will return the next ResolvedPipelineRunTasks to execute, which are the ones in the // list of candidateTasks which aren't yet indicated in state to be running. func (state PipelineRunState) GetNextTasks(candidateTasks map[string]struct{}) []*ResolvedPipelineRunTask { diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index 2b7ade90326..4507a9db493 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -1952,3 +1952,15 @@ func TestValidateWorkspaceBindings(t *testing.T) { t.Fatalf("Expected error indicating `foo` workspace was not provided but got no error") } } + +func TestIsBeforeFirstTaskRun_WithNotStartedTask(t *testing.T) { + if !noneStartedState.IsBeforeFirstTaskRun() { + t.Fatalf("Expected state to be before first taskrun") + } +} + +func TestIsBeforeFirstTaskRun_WithStartedTask(t *testing.T) { + if oneStartedState.IsBeforeFirstTaskRun() { + t.Fatalf("Expected state to be after first taskrun") + } +} diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index 061e1c6999e..4642b801947 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -30,6 +30,7 @@ import ( "github.com/tektoncd/pipeline/pkg/pod" "github.com/tektoncd/pipeline/pkg/reconciler" cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/cloudevent" + "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "k8s.io/client-go/tools/cache" kubeclient "knative.dev/pkg/client/injection/kube/client" podinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod" @@ -82,6 +83,7 @@ func NewController(images pipeline.Images) func(context.Context, configmap.Watch cloudEventClient: cloudeventclient.Get(ctx), metrics: metrics, entrypointCache: entrypointCache, + pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), } impl := controller.NewImpl(c, c.Logger, pipeline.TaskRunControllerName) diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 53aabf49ad4..c399cf46842 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -36,6 +36,7 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/cloudevent" + "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "github.com/tektoncd/pipeline/pkg/termination" "github.com/tektoncd/pipeline/pkg/workspace" "go.uber.org/zap" @@ -68,6 +69,7 @@ type Reconciler struct { entrypointCache podconvert.EntrypointCache timeoutHandler *reconciler.TimeoutSet metrics *Recorder + pvcHandler volumeclaim.PvcHandler } // Check that our Reconciler implements controller.Reconciler @@ -375,6 +377,23 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error } if pod == nil { + if tr.HasVolumeClaimTemplate() { + if err = c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(tr.Spec.Workspaces, tr.GetOwnerReference(), tr.Namespace); err != nil { + c.Logger.Errorf("Failed to create PVC for TaskRun %s: %v", tr.Name, err) + tr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: volumeclaim.ReasonCouldntCreateWorkspacePVC, + Message: fmt.Sprintf("Failed to create PVC for TaskRun %s workspaces correctly: %s", + fmt.Sprintf("%s/%s", tr.Namespace, tr.Name), err), + }) + return nil + } + + taskRunWorkspaces := applyVolumeClaimTemplates(tr.Spec.Workspaces, tr.GetOwnerReference()) + tr.Spec.Workspaces = taskRunWorkspaces + } + pod, err = c.createPod(tr, rtr) if err != nil { c.handlePodCreationError(tr, err) @@ -668,3 +687,25 @@ func updateStoppedSidecarStatus(pod *corev1.Pod, tr *v1alpha1.TaskRun, c *Reconc _, err := c.updateStatus(tr) return err } + +// apply VolumeClaimTemplates and return WorkspaceBindings were templates is translated to PersistentVolumeClaims +func applyVolumeClaimTemplates(workspaceBindings []v1alpha1.WorkspaceBinding, owner metav1.OwnerReference) []v1alpha1.WorkspaceBinding { + taskRunWorkspaceBindings := make([]v1alpha1.WorkspaceBinding, 0) + for _, wb := range workspaceBindings { + if wb.VolumeClaimTemplate == nil { + taskRunWorkspaceBindings = append(taskRunWorkspaceBindings, wb) + continue + } + + // apply template + b := v1alpha1.WorkspaceBinding{ + Name: wb.Name, + SubPath: wb.SubPath, + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: volumeclaim.GetPersistentVolumeClaimName(wb.VolumeClaimTemplate, wb, owner), + }, + } + taskRunWorkspaceBindings = append(taskRunWorkspaceBindings, b) + } + return taskRunWorkspaceBindings +} diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 5610a4548b7..b2b69c68471 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -2179,3 +2179,54 @@ func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) { }) } } + +// TestReconcileWorkspaceWithVolumeClaimTemplate tests a reconcile of a TaskRun that has +// a Workspace with VolumeClaimTemplate and check that it is translated to a created PersistentVolumeClaim. +func TestReconcileWorkspaceWithVolumeClaimTemplate(t *testing.T) { + workspaceName := "ws1" + claimName := "mypvc" + taskWithWorkspace := tb.Task("test-task-with-workspace", "foo", + tb.TaskSpec( + tb.TaskWorkspace(workspaceName, "a test task workspace", "", true), + )) + taskRun := tb.TaskRun("test-taskrun-missing-workspace", "foo", tb.TaskRunSpec( + tb.TaskRunTaskRef(taskWithWorkspace.Name, tb.TaskRefAPIVersion("a1")), + tb.TaskRunWorkspaceVolumeClaimTemplate(workspaceName, "", &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName, + }, + Spec: corev1.PersistentVolumeClaimSpec{}, + }), + )) + d := test.Data{ + Tasks: []*v1alpha1.Task{taskWithWorkspace}, + TaskRuns: []*v1alpha1.TaskRun{taskRun}, + ClusterTasks: nil, + PipelineResources: nil, + } + names.TestingSeed() + testAssets, cancel := getTaskRunController(t, d) + defer cancel() + clients := testAssets.Clients + + if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + t.Errorf("expected no error reconciling valid TaskRun but got %v", err) + } + + ttt, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) + } + + for _, w := range ttt.Spec.Workspaces { + if w.PersistentVolumeClaim != nil { + t.Fatalf("expected workspace from volumeClaimTemplate to be translated to PVC") + } + } + + expectedPVCName := fmt.Sprintf("%s-%s-%s", claimName, workspaceName, taskRun.Name) + _, err = clients.Kube.CoreV1().PersistentVolumeClaims(taskRun.Namespace).Get(expectedPVCName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("expected PVC %s to exist but instead got error when getting it: %v", expectedPVCName, err) + } +} diff --git a/pkg/reconciler/volumeclaim/pvchandler.go b/pkg/reconciler/volumeclaim/pvchandler.go new file mode 100644 index 00000000000..928968e22a0 --- /dev/null +++ b/pkg/reconciler/volumeclaim/pvchandler.go @@ -0,0 +1,73 @@ +package volumeclaim + +import ( + "fmt" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + errorutils "k8s.io/apimachinery/pkg/util/errors" + clientset "k8s.io/client-go/kubernetes" +) + +const ( + // ReasonCouldntCreateWorkspacePVC indicates that a Pipeline expects a workspace from a + // volumeClaimTemplate but couldn't create a claim. + ReasonCouldntCreateWorkspacePVC = "CouldntCreateWorkspacePVC" +) + +type PvcHandler interface { + CreatePersistentVolumeClaimsForWorkspaces(wb []v1alpha1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error +} + +type defaultPVCHandler struct { + clientset clientset.Interface + logger *zap.SugaredLogger +} + +func NewPVCHandler(clientset clientset.Interface, logger *zap.SugaredLogger) PvcHandler { + return &defaultPVCHandler{clientset, logger} +} + +func (c *defaultPVCHandler) CreatePersistentVolumeClaimsForWorkspaces(wb []v1alpha1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) error { + var errs []error + for _, claim := range getPersistentVolumeClaims(wb, ownerReference, namespace) { + _, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + _, err := c.clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) + if err != nil { + errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err)) + } + if err == nil || !apierrors.IsAlreadyExists(err) { + c.logger.Infof("Created PersistentVolumeClaim %s in namespace %s", claim.Name, claim.Namespace) + } + case err != nil: + errs = append(errs, fmt.Errorf("failed to retrieve PVC %s: %s", claim.Name, err)) + } + } + return errorutils.NewAggregate(errs) +} + +func getPersistentVolumeClaims(workspaceBindings []v1alpha1.WorkspaceBinding, ownerReference metav1.OwnerReference, namespace string) map[string]*corev1.PersistentVolumeClaim { + claims := make(map[string]*corev1.PersistentVolumeClaim, 0) + for _, workspaceBinding := range workspaceBindings { + if workspaceBinding.VolumeClaimTemplate == nil { + continue + } + + claim := workspaceBinding.VolumeClaimTemplate.DeepCopy() + claim.Name = GetPersistentVolumeClaimName(workspaceBinding.VolumeClaimTemplate, workspaceBinding, ownerReference) + claim.Namespace = namespace + claim.OwnerReferences = []metav1.OwnerReference{ownerReference} + claims[workspaceBinding.Name] = claim + } + return claims +} + +// getPersistentVolumeClaimName gets the name of PersistentVolumeClaim for a Workspace and PipelineRun or TaskRun. claim +// must be a PersistentVolumeClaim from set's VolumeClaims template. +func GetPersistentVolumeClaimName(claim *corev1.PersistentVolumeClaim, wb v1alpha1.WorkspaceBinding, owner metav1.OwnerReference) string { + return fmt.Sprintf("%s-%s-%s", claim.Name, wb.Name, owner.Name) +} diff --git a/pkg/reconciler/volumeclaim/pvchandler_test.go b/pkg/reconciler/volumeclaim/pvchandler_test.go new file mode 100644 index 00000000000..d5841e63e36 --- /dev/null +++ b/pkg/reconciler/volumeclaim/pvchandler_test.go @@ -0,0 +1,95 @@ +package volumeclaim + +import ( + "fmt" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" + "testing" +) + +const actionCreate = "create" + +// check that defaultPVCHandler implements PvcHandler +var _ PvcHandler = (*defaultPVCHandler)(nil) + +// TestCreatePersistentVolumeClaimsForWorkspaces tests that given a TaskRun with volumeClaimTemplate workspace, +// a PVC is created, with the expected name and that it has the expected OwnerReference. +func TestCreatePersistentVolumeClaimsForWorkspaces(t *testing.T) { + + // given + + // 2 workspaces with volumeClaimTemplate + claimName1 := "pvc1" + ws1 := "myws1" + ownerName := "taskrun1" + workspaces := []v1alpha1.WorkspaceBinding{{ + Name: ws1, + VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName1, + }, + Spec: corev1.PersistentVolumeClaimSpec{}, + }, + }, { + Name: "bring-my-own-pvc", + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "myown", + }, + }, { + Name: "myws2", + VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc2", + }, + Spec: corev1.PersistentVolumeClaimSpec{}, + }, + }} + + ownerRef := metav1.OwnerReference{Name: ownerName} + namespace := "ns" + fakekubeclient := fakek8s.NewSimpleClientset() + pvcHandler := defaultPVCHandler{fakekubeclient, zap.NewExample().Sugar()} + + // when + + err := pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(workspaces, ownerRef, namespace) + if err != nil { + t.Fatalf("unexpexted error: %v", err) + } + + expectedPVCName := fmt.Sprintf("%s-%s-%s", claimName1, ws1, ownerName) + pvc, err := fakekubeclient.CoreV1().PersistentVolumeClaims(namespace).Get(expectedPVCName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + createActions := 0 + for _, action := range fakekubeclient.Fake.Actions() { + if actionCreate == action.GetVerb() { + createActions++ + } + } + + // that + + expectedNumberOfCreateActions := 2 + if createActions != expectedNumberOfCreateActions { + t.Fatalf("unexpected numer of 'create' PVC actions; expected: %d got: %d", expectedNumberOfCreateActions, createActions) + } + + if pvc.Name != expectedPVCName { + t.Fatalf("unexpected PVC name on created PVC; exptected: %s got: %s", expectedPVCName, pvc.Name) + } + + expectedNumberOfOwnerRefs := 1 + if len(pvc.OwnerReferences) != expectedNumberOfOwnerRefs { + t.Fatalf("unexpected number of ownerreferences on created PVC; expected: %d got %d", expectedNumberOfOwnerRefs, len(pvc.OwnerReferences)) + } + + if pvc.OwnerReferences[0].Name != ownerName { + t.Fatalf("unexptected name in ownerreference on created PVC; expected: %s got %s", ownerName, pvc.OwnerReferences[0].Name) + } +} diff --git a/test/builder/pipeline.go b/test/builder/pipeline.go index 52aee709ed7..0975779843b 100644 --- a/test/builder/pipeline.go +++ b/test/builder/pipeline.go @@ -577,3 +577,18 @@ func PipelineRunWorkspaceBindingEmptyDir(name string) PipelineRunSpecOp { }) } } + +// PipelineRunWorkspaceBindingVolumeClaimTemplate adds an VolumeClaimTemplate Workspace to the workspaces of a pipelineRun spec. +func PipelineRunWorkspaceBindingVolumeClaimTemplate(name string, claimName string) PipelineRunSpecOp { + return func(spec *v1alpha1.PipelineRunSpec) { + spec.Workspaces = append(spec.Workspaces, v1alpha1.WorkspaceBinding{ + Name: name, + VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName, + }, + Spec: corev1.PersistentVolumeClaimSpec{}, + }, + }) + } +} diff --git a/test/builder/task.go b/test/builder/task.go index 5ba88b1e55c..44a047a7c97 100644 --- a/test/builder/task.go +++ b/test/builder/task.go @@ -893,6 +893,17 @@ func TaskRunWorkspacePVC(name, subPath, claimName string) TaskRunSpecOp { } } +// TaskRunWorkspaceVolumeClaimTemplate adds a workspace binding with a VolumeClaimTemplate volume source. +func TaskRunWorkspaceVolumeClaimTemplate(name, subPath string, volumeClaimTemplate *corev1.PersistentVolumeClaim) TaskRunSpecOp { + return func(spec *v1alpha1.TaskRunSpec) { + spec.Workspaces = append(spec.Workspaces, v1alpha1.WorkspaceBinding{ + Name: name, + SubPath: subPath, + VolumeClaimTemplate: volumeClaimTemplate, + }) + } +} + // ResolvedTaskResources creates a ResolvedTaskResources with default values. // Any number of ResolvedTaskResources modifier can be passed to transform it. func ResolvedTaskResources(ops ...ResolvedTaskResourcesOp) *resources.ResolvedTaskResources {