From 4ca967c3222ff87dc9f21d94a1f4d06f72785ae8 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Fri, 29 Nov 2019 18:29:12 -0500 Subject: [PATCH] Split out step resource request management Before this change, all steps' resource requests (*not* limits) were zeroed out unless that value for that metric represented the maximum across all steps. If step 2 requested the maximum memory of all steps, but not the max CPU, its memory request would persist but its CPU request would be zeroed out. After this change, the max request for each resource type (CPU, memory, ephermeral storage) is calculated, then applied to the *last* step's container. The effect is the same, unless Pods' resource requests are updated to account for exited containers (afaik they're not), and this is simpler to express and explain. And since all the code for this is in its own method and file, it's easier to test in isolation. --- pkg/pod/pod.go | 51 +----------------- pkg/pod/pod_test.go | 93 +++++--------------------------- pkg/pod/resource_request.go | 36 +++++++++++++ pkg/pod/resource_request_test.go | 88 ++++++++++++++++++++++++++++++ 4 files changed, 138 insertions(+), 130 deletions(-) create mode 100644 pkg/pod/resource_request.go create mode 100644 pkg/pod/resource_request_test.go diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index 75b84259111..960efc220ac 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -28,7 +28,6 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/names" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" @@ -77,8 +76,6 @@ var ( VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, }} - zeroQty = resource.MustParse("0") - // Random byte reader used for pod name generation. // var for testing. randReader = rand.Reader @@ -136,12 +133,8 @@ func MakePod(images pipeline.Images, taskRun *v1alpha1.TaskRun, taskSpec v1alpha initContainers = append(initContainers, entrypointInit) volumes = append(volumes, toolsVolume, downwardVolume) - // Zero out non-max resource requests. - // TODO(#1605): Split this out so it's more easily testable. - maxIndicesByResource := findMaxResourceRequest(taskSpec.Steps, corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage) - for i := range stepContainers { - zeroNonMaxResourceRequests(&stepContainers[i], i, maxIndicesByResource) - } + // Zero out non-max resource requests, move max resource requests to the last step. + stepContainers = resolveResourceRequests(stepContainers) // Add implicit env vars. // They're prepended to the list, so that if the user specified any @@ -255,43 +248,3 @@ func makeLabels(s *v1alpha1.TaskRun) map[string]string { labels[taskRunLabelKey] = s.Name return labels } - -// zeroNonMaxResourceRequests zeroes out the container's cpu, memory, or -// ephemeral storage resource requests if the container does not have the -// largest request out of all containers in the pod. This is done because Tekton -// overwrites each container's entrypoint to make containers effectively execute -// one at a time, so we want pods to only request the maximum resources needed -// at any single point in time. If no container has an explicit resource -// request, all requests are set to 0. -func zeroNonMaxResourceRequests(c *corev1.Container, stepIndex int, maxIndicesByResource map[corev1.ResourceName]int) { - if c.Resources.Requests == nil { - c.Resources.Requests = corev1.ResourceList{} - } - for name, maxIdx := range maxIndicesByResource { - if maxIdx != stepIndex { - c.Resources.Requests[name] = zeroQty - } - } -} - -// findMaxResourceRequest returns the index of the container with the maximum -// request for the given resource from among the given set of containers. -func findMaxResourceRequest(steps []v1alpha1.Step, resourceNames ...corev1.ResourceName) map[corev1.ResourceName]int { - maxIdxs := make(map[corev1.ResourceName]int, len(resourceNames)) - maxReqs := make(map[corev1.ResourceName]resource.Quantity, len(resourceNames)) - for _, name := range resourceNames { - maxIdxs[name] = -1 - maxReqs[name] = zeroQty - } - for i, s := range steps { - for _, name := range resourceNames { - maxReq := maxReqs[name] - req, exists := s.Container.Resources.Requests[name] - if exists && req.Cmp(maxReq) > 0 { - maxIdxs[name] = i - maxReqs[name] = req - } - } - } - return maxIdxs -} diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go index 2b46a10aff7..6921c7913d8 100644 --- a/pkg/pod/pod_test.go +++ b/pkg/pod/pod_test.go @@ -39,9 +39,6 @@ var ( CredsImage: "override-with-creds:latest", ShellImage: "busybox", } - resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool { - return x.Cmp(y) == 0 - }) ) func TestMakePod(t *testing.T) { @@ -103,13 +100,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }}, Volumes: append(implicitVolumes, toolsVolume, downwardVolume), }, @@ -160,13 +151,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }}, Volumes: append(implicitVolumes, secretsVolume, toolsVolume, downwardVolume), }, @@ -209,13 +194,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }}, Volumes: append(implicitVolumes, toolsVolume, downwardVolume), SecurityContext: &corev1.PodSecurityContext{ @@ -254,13 +233,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }}, Volumes: append(implicitVolumes, toolsVolume, downwardVolume), }, @@ -293,13 +266,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }}, Volumes: append(implicitVolumes, toolsVolume, downwardVolume), }, @@ -342,13 +309,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: filepath.Join(workspaceDir, "test"), - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }}, Volumes: append(implicitVolumes, toolsVolume, downwardVolume), }, @@ -386,13 +347,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }, { Name: "sidecar-sc-name", Image: "sidecar-image", @@ -445,13 +400,7 @@ func TestMakePod(t *testing.T) { Env: implicitEnvVars, VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("8"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }, { Name: "step-unnamed-1", Image: "image", @@ -470,7 +419,7 @@ func TestMakePod(t *testing.T) { WorkingDir: workspaceDir, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), + corev1.ResourceCPU: resource.MustParse("8"), corev1.ResourceMemory: resource.MustParse("100Gi"), corev1.ResourceEphemeralStorage: resource.MustParse("0"), }, @@ -550,13 +499,7 @@ script-heredoc-randomly-generated-6nl7g Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}), VolumeMounts: append([]corev1.VolumeMount{scriptsVolumeMount, toolsMount, downwardMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }, { Name: "step-two", Image: "image", @@ -573,13 +516,7 @@ script-heredoc-randomly-generated-6nl7g Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}), VolumeMounts: append([]corev1.VolumeMount{{Name: "i-have-a-volume-mount"}, scriptsVolumeMount, toolsMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }, { Name: "step-regular-step", Image: "image", @@ -599,13 +536,7 @@ script-heredoc-randomly-generated-6nl7g Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}), VolumeMounts: append([]corev1.VolumeMount{toolsMount}, implicitVolumeMounts...), WorkingDir: workspaceDir, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("0"), - corev1.ResourceMemory: resource.MustParse("0"), - corev1.ResourceEphemeralStorage: resource.MustParse("0"), - }, - }, + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, }}, Volumes: append(implicitVolumes, scriptsVolume, toolsVolume, downwardVolume), }, diff --git a/pkg/pod/resource_request.go b/pkg/pod/resource_request.go new file mode 100644 index 00000000000..2fd4abdb2e8 --- /dev/null +++ b/pkg/pod/resource_request.go @@ -0,0 +1,36 @@ +package pod + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +var zeroQty = resource.MustParse("0") + +func allZeroQty() corev1.ResourceList { + return corev1.ResourceList{ + corev1.ResourceCPU: zeroQty, + corev1.ResourceMemory: zeroQty, + corev1.ResourceEphemeralStorage: zeroQty, + } +} + +func resolveResourceRequests(containers []corev1.Container) []corev1.Container { + max := allZeroQty() + for _, c := range containers { + for k, v := range c.Resources.Requests { + if v.Cmp(max[k]) > 0 { + max[k] = v + } + } + } + + // Set resource requests for all steps but the theh last container to + // zero. + for i := range containers[:len(containers)-1] { + containers[i].Resources.Requests = allZeroQty() + } + // Set the last container's request to the max of all resources. + containers[len(containers)-1].Resources.Requests = max + return containers +} diff --git a/pkg/pod/resource_request_test.go b/pkg/pod/resource_request_test.go new file mode 100644 index 00000000000..d838667a628 --- /dev/null +++ b/pkg/pod/resource_request_test.go @@ -0,0 +1,88 @@ +package pod + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +var resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool { + return x.Cmp(y) == 0 +}) + +func TestResolveResourceRequests(t *testing.T) { + for _, c := range []struct { + desc string + in, want []corev1.Container + }{{ + desc: "three steps, no requests", + in: []corev1.Container{{}, {}, {}}, + want: []corev1.Container{{ + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, + }, { + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, + }, { + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, + }}, + }, { + desc: "requests are moved, limits aren't changed", + in: []corev1.Container{{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + }, + }, + }, { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("11Gi"), + }, + }, + }, { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + }}, + want: []corev1.Container{{ + // All zeroed out. + Resources: corev1.ResourceRequirements{Requests: allZeroQty()}, + }, { + // Requests zeroed out, limits remain. + Resources: corev1.ResourceRequirements{ + Requests: allZeroQty(), + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("11Gi"), + }, + }, + }, { + // Requests to the max, limits remain. + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("10Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + }}, + }} { + t.Run(c.desc, func(t *testing.T) { + got := resolveResourceRequests(c.in) + if d := cmp.Diff(c.want, got, resourceQuantityCmp); d != "" { + t.Errorf("Diff(-want, +got): %s", d) + } + }) + } +}