Skip to content

Commit

Permalink
Split out step resource request management
Browse files Browse the repository at this point in the history
Before this change, all steps' resource requests (*not* limits) were
zeroed out unless that value for that metric represented the maximum
across all steps. If step 2 requested the maximum memory of all steps,
but not the max CPU, its memory request would persist but its CPU
request would be zeroed out.

After this change, the max request for each resource type (CPU, memory,
ephermeral storage) is calculated, then applied to the *last* step's
container. The effect is the same, unless Pods' resource requests are
updated to account for exited containers (afaik they're not), and this
is simpler to express and explain. And since all the code for this is in
its own method and file, it's easier to test in isolation.
  • Loading branch information
imjasonh committed Dec 2, 2019
1 parent 8909b63 commit 4ca967c
Show file tree
Hide file tree
Showing 4 changed files with 138 additions and 130 deletions.
51 changes: 2 additions & 49 deletions pkg/pod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/names"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -77,8 +76,6 @@ var (
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}}

zeroQty = resource.MustParse("0")

// Random byte reader used for pod name generation.
// var for testing.
randReader = rand.Reader
Expand Down Expand Up @@ -136,12 +133,8 @@ func MakePod(images pipeline.Images, taskRun *v1alpha1.TaskRun, taskSpec v1alpha
initContainers = append(initContainers, entrypointInit)
volumes = append(volumes, toolsVolume, downwardVolume)

// Zero out non-max resource requests.
// TODO(#1605): Split this out so it's more easily testable.
maxIndicesByResource := findMaxResourceRequest(taskSpec.Steps, corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage)
for i := range stepContainers {
zeroNonMaxResourceRequests(&stepContainers[i], i, maxIndicesByResource)
}
// Zero out non-max resource requests, move max resource requests to the last step.
stepContainers = resolveResourceRequests(stepContainers)

// Add implicit env vars.
// They're prepended to the list, so that if the user specified any
Expand Down Expand Up @@ -255,43 +248,3 @@ func makeLabels(s *v1alpha1.TaskRun) map[string]string {
labels[taskRunLabelKey] = s.Name
return labels
}

// zeroNonMaxResourceRequests zeroes out the container's cpu, memory, or
// ephemeral storage resource requests if the container does not have the
// largest request out of all containers in the pod. This is done because Tekton
// overwrites each container's entrypoint to make containers effectively execute
// one at a time, so we want pods to only request the maximum resources needed
// at any single point in time. If no container has an explicit resource
// request, all requests are set to 0.
func zeroNonMaxResourceRequests(c *corev1.Container, stepIndex int, maxIndicesByResource map[corev1.ResourceName]int) {
if c.Resources.Requests == nil {
c.Resources.Requests = corev1.ResourceList{}
}
for name, maxIdx := range maxIndicesByResource {
if maxIdx != stepIndex {
c.Resources.Requests[name] = zeroQty
}
}
}

// findMaxResourceRequest returns the index of the container with the maximum
// request for the given resource from among the given set of containers.
func findMaxResourceRequest(steps []v1alpha1.Step, resourceNames ...corev1.ResourceName) map[corev1.ResourceName]int {
maxIdxs := make(map[corev1.ResourceName]int, len(resourceNames))
maxReqs := make(map[corev1.ResourceName]resource.Quantity, len(resourceNames))
for _, name := range resourceNames {
maxIdxs[name] = -1
maxReqs[name] = zeroQty
}
for i, s := range steps {
for _, name := range resourceNames {
maxReq := maxReqs[name]
req, exists := s.Container.Resources.Requests[name]
if exists && req.Cmp(maxReq) > 0 {
maxIdxs[name] = i
maxReqs[name] = req
}
}
}
return maxIdxs
}
93 changes: 12 additions & 81 deletions pkg/pod/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,6 @@ var (
CredsImage: "override-with-creds:latest",
ShellImage: "busybox",
}
resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
return x.Cmp(y) == 0
})
)

func TestMakePod(t *testing.T) {
Expand Down Expand Up @@ -103,13 +100,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
Expand Down Expand Up @@ -160,13 +151,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
Volumes: append(implicitVolumes, secretsVolume, toolsVolume, downwardVolume),
},
Expand Down Expand Up @@ -209,13 +194,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
SecurityContext: &corev1.PodSecurityContext{
Expand Down Expand Up @@ -254,13 +233,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
Expand Down Expand Up @@ -293,13 +266,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
Expand Down Expand Up @@ -342,13 +309,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: filepath.Join(workspaceDir, "test"),
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
Expand Down Expand Up @@ -386,13 +347,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}, {
Name: "sidecar-sc-name",
Image: "sidecar-image",
Expand Down Expand Up @@ -445,13 +400,7 @@ func TestMakePod(t *testing.T) {
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("8"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}, {
Name: "step-unnamed-1",
Image: "image",
Expand All @@ -470,7 +419,7 @@ func TestMakePod(t *testing.T) {
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceCPU: resource.MustParse("8"),
corev1.ResourceMemory: resource.MustParse("100Gi"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
Expand Down Expand Up @@ -550,13 +499,7 @@ script-heredoc-randomly-generated-6nl7g
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
VolumeMounts: append([]corev1.VolumeMount{scriptsVolumeMount, toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}, {
Name: "step-two",
Image: "image",
Expand All @@ -573,13 +516,7 @@ script-heredoc-randomly-generated-6nl7g
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
VolumeMounts: append([]corev1.VolumeMount{{Name: "i-have-a-volume-mount"}, scriptsVolumeMount, toolsMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}, {
Name: "step-regular-step",
Image: "image",
Expand All @@ -599,13 +536,7 @@ script-heredoc-randomly-generated-6nl7g
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
VolumeMounts: append([]corev1.VolumeMount{toolsMount}, implicitVolumeMounts...),
WorkingDir: workspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("0"),
corev1.ResourceMemory: resource.MustParse("0"),
corev1.ResourceEphemeralStorage: resource.MustParse("0"),
},
},
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
Volumes: append(implicitVolumes, scriptsVolume, toolsVolume, downwardVolume),
},
Expand Down
36 changes: 36 additions & 0 deletions pkg/pod/resource_request.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
package pod

import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)

var zeroQty = resource.MustParse("0")

func allZeroQty() corev1.ResourceList {
return corev1.ResourceList{
corev1.ResourceCPU: zeroQty,
corev1.ResourceMemory: zeroQty,
corev1.ResourceEphemeralStorage: zeroQty,
}
}

func resolveResourceRequests(containers []corev1.Container) []corev1.Container {
max := allZeroQty()
for _, c := range containers {
for k, v := range c.Resources.Requests {
if v.Cmp(max[k]) > 0 {
max[k] = v
}
}
}

// Set resource requests for all steps but the theh last container to
// zero.
for i := range containers[:len(containers)-1] {
containers[i].Resources.Requests = allZeroQty()
}
// Set the last container's request to the max of all resources.
containers[len(containers)-1].Resources.Requests = max
return containers
}
88 changes: 88 additions & 0 deletions pkg/pod/resource_request_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
package pod

import (
"testing"

"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)

var resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
return x.Cmp(y) == 0
})

func TestResolveResourceRequests(t *testing.T) {
for _, c := range []struct {
desc string
in, want []corev1.Container
}{{
desc: "three steps, no requests",
in: []corev1.Container{{}, {}, {}},
want: []corev1.Container{{
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}, {
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}, {
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}},
}, {
desc: "requests are moved, limits aren't changed",
in: []corev1.Container{{
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10"),
},
},
}, {
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("10Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("11Gi"),
},
},
}, {
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("100Gi"),
},
},
}},
want: []corev1.Container{{
// All zeroed out.
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
}, {
// Requests zeroed out, limits remain.
Resources: corev1.ResourceRequirements{
Requests: allZeroQty(),
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("11Gi"),
},
},
}, {
// Requests to the max, limits remain.
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10"),
corev1.ResourceMemory: resource.MustParse("10Gi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("100Gi"),
},
},
}},
}} {
t.Run(c.desc, func(t *testing.T) {
got := resolveResourceRequests(c.in)
if d := cmp.Diff(c.want, got, resourceQuantityCmp); d != "" {
t.Errorf("Diff(-want, +got): %s", d)
}
})
}
}

0 comments on commit 4ca967c

Please sign in to comment.