Skip to content

Commit

Permalink
check for limitrange minimum for taskrun container requests
Browse files Browse the repository at this point in the history
  • Loading branch information
danielhelfand committed Feb 3, 2020
1 parent ad6610e commit 482085d
Show file tree
Hide file tree
Showing 6 changed files with 239 additions and 7 deletions.
2 changes: 1 addition & 1 deletion config/200-clusterrole.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ metadata:
name: tekton-pipelines-admin
rules:
- apiGroups: [""]
resources: ["pods", "pods/log", "namespaces", "secrets", "events", "serviceaccounts", "configmaps", "persistentvolumeclaims"]
resources: ["pods", "pods/log", "namespaces", "secrets", "events", "serviceaccounts", "configmaps", "persistentvolumeclaims", "limitranges"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
- apiGroups: ["apps"]
resources: ["deployments"]
Expand Down
45 changes: 45 additions & 0 deletions docs/tekton-limitrange-config.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Tekton LimitRange Config

This documentation holds information on how to create a `tekton-limitrange-config` in
the event you would like to apply a LimitRange minimum for container requests instead
of Tekton's default behavior.

## How Tekton Handles Container Requests for a TaskRun

In order to request the minimum amount of resources needed to support the containers
for Steps that are part of a TaskRun, Tekton only requests the maximum values for CPU,
memory, and ephemeral storage from the Steps that are part of a TaskRun. Only the max
resource request values are needed since Steps only execute one at a time in TaskRun pod.
All requests that are not the max values are set to zero as a result.

## tekton-limitrange-config Example

The [behavior above](#How-Tekton-Handles-Container-Requests) is how Tekton carries out
TaskRuns by default, but, in the event you are working in a namespace that has a LimitRange
minimum defined for container requests, a ConfigMap named `tekton-limitrange-config` can be
created that holds the name of the LimitRange in the namespace where your TaskRuns are to be ran.

An example `tekton-limitrange-config` is shown below:

```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: tekton-limitrange-config
data:
default-limitrange-name: "limit-mem-cpu-per-container"
```
In the example above, the `tekton-limitrange-config` would apply the container request minimum from
a LimitRange named `limit-mem-cpu-per-container`. Instead of requesting a zero quantity for all non
maximum container request values for a TaskRun, the container request minimum from `limit-mem-cpu-per-container`
would be used instead.

## Creating the tekton-limitrange-config

To use the `tekton-limitrange-config`, a ConfigMap must be created that is named `tekton-limitrange-config` and
has a key named `default-limitrange-name` with a value that corresponds to the LimitRange name. The ConfigMap
must be created in the namespace where your TaskRuns will be ran.

After defining the ConfigMap for the `tekton-limitrange-config` in a file, use `kubectl apply -f` to make the
LimitRange name available to Tekton.
2 changes: 2 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ go 1.13

require (
cloud.google.com/go v0.47.0 // indirect
cloud.google.com/go/storage v1.0.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0 // indirect
contrib.go.opencensus.io/exporter/stackdriver v0.12.8 // indirect
github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher v0.0.0-20191203181535-308b93ad1f39
Expand Down Expand Up @@ -40,6 +41,7 @@ require (
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
google.golang.org/api v0.10.0
google.golang.org/appengine v1.6.5 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/yaml.v2 v2.2.5 // indirect
Expand Down
20 changes: 19 additions & 1 deletion pkg/pod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,26 @@ func MakePod(images pipeline.Images, taskRun *v1alpha1.TaskRun, taskSpec v1alpha
initContainers = append(initContainers, entrypointInit)
volumes = append(volumes, toolsVolume, downwardVolume)

// Get LimitRange by name if present in ConfigMap tekton-limitrange-config.
// Otherwise, pass empty request to resolveResourceRequests.
limitRangeName := ""
limitRangeConfig, err := kubeclient.CoreV1().ConfigMaps(taskRun.Namespace).Get("tekton-limitrange-config", metav1.GetOptions{})
if err == nil {
limitRangeName = limitRangeConfig.Data["default-limitrange-name"]
if limitRangeName == "" {
return nil, fmt.Errorf("tekton-limitrange-config is present but default-limitrange-name key is missing")
}
}
var limitRange *corev1.LimitRange
if limitRangeName != "" {
limitRange, err = kubeclient.CoreV1().LimitRanges(taskRun.Namespace).Get(limitRangeName, metav1.GetOptions{})
if err != nil {
return nil, err
}
}

// Zero out non-max resource requests.
stepContainers = resolveResourceRequests(stepContainers)
stepContainers = resolveResourceRequests(stepContainers, limitRange)

// Add implicit env vars.
// They're prepended to the list, so that if the user specified any
Expand Down
34 changes: 31 additions & 3 deletions pkg/pod/resource_request.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
)

var emptyResourceQuantity = resource.Quantity{}
var zeroQty = resource.MustParse("0")

func allZeroQty() corev1.ResourceList {
Expand All @@ -31,7 +32,7 @@ func allZeroQty() corev1.ResourceList {
}
}

func resolveResourceRequests(containers []corev1.Container) []corev1.Container {
func resolveResourceRequests(containers []corev1.Container, limitRange *corev1.LimitRange) []corev1.Container {
max := allZeroQty()
resourceNames := []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage}
maxIndicesByResource := make(map[corev1.ResourceName]int, len(resourceNames))
Expand All @@ -50,16 +51,43 @@ func resolveResourceRequests(containers []corev1.Container) []corev1.Container {
}
}

// Get limitrange minimum for container requests so they won't
// be zeroed out if minimum is specified in namespace
var limitRangeItems []corev1.LimitRangeItem
if limitRange != nil {
limitRangeItems = limitRange.Spec.Limits
}
min := allZeroQty()
for _, limitRangeItem := range limitRangeItems {
if limitRangeItem.Type == corev1.LimitTypeContainer {
if limitRangeItem.Min != nil {
min = limitRangeItem.Min
}
break
}
}

// Use zeroQty if request value is not set for min
if min[corev1.ResourceCPU] == emptyResourceQuantity {
min[corev1.ResourceCPU] = zeroQty
}
if min[corev1.ResourceMemory] == emptyResourceQuantity {
min[corev1.ResourceMemory] = zeroQty
}
if min[corev1.ResourceEphemeralStorage] == emptyResourceQuantity {
min[corev1.ResourceEphemeralStorage] = zeroQty
}

// Set all non max resource requests to 0. Leave max request at index
// originally defined to account for limit of step.
for i := range containers {
if containers[i].Resources.Requests == nil {
containers[i].Resources.Requests = allZeroQty()
containers[i].Resources.Requests = min
continue
}
for _, resourceName := range resourceNames {
if maxIndicesByResource[resourceName] != i {
containers[i].Resources.Requests[resourceName] = zeroQty
containers[i].Resources.Requests[resourceName] = min[resourceName]
}
}
}
Expand Down
143 changes: 141 additions & 2 deletions pkg/pod/resource_request_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ var resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
return x.Cmp(y) == 0
})

func TestResolveResourceRequests(t *testing.T) {
func TestResolveResourceRequests_No_LimitRange(t *testing.T) {
for _, c := range []struct {
desc string
in, want []corev1.Container
Expand Down Expand Up @@ -206,7 +206,146 @@ func TestResolveResourceRequests(t *testing.T) {
},
} {
t.Run(c.desc, func(t *testing.T) {
got := resolveResourceRequests(c.in)
got := resolveResourceRequests(c.in, nil)
if d := cmp.Diff(c.want, got, resourceQuantityCmp); d != "" {
t.Errorf("Diff(-want, +got): %s", d)
}
})
}
}

func TestResolveResourceRequests_LimitRange(t *testing.T) {
for _, c := range []struct {
desc string
in, want []corev1.Container
}{{
desc: "three steps, no requests, apply minimum to all",
in: []corev1.Container{{}, {}, {}},
want: []corev1.Container{{
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("99Mi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100m"),
},
},
}, {
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("99Mi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100m"),
},
},
}, {
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("99Mi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100m"),
},
},
}},
}, {
desc: "three steps, no requests, apply minimum values when not max values",
in: []corev1.Container{{
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10"),
},
},
}, {
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("10Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("11Gi"),
},
},
}, {
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("100Gi"),
},
},
}},
want: []corev1.Container{{
// ResourceCPU max request
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10"),
corev1.ResourceMemory: resource.MustParse("99Mi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100m"),
},
},
}, {
// ResourceMemory max request
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("10Gi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100m"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("11Gi"),
},
},
}, {
// ResourceEphemeralStorage max request
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("99Mi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("100Gi"),
},
},
}},
}, {
desc: "Only one step container with all request values filled out, no min values",
in: []corev1.Container{{
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10"),
corev1.ResourceMemory: resource.MustParse("10Gi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
},
}},
want: []corev1.Container{{
// All max values set
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10"),
corev1.ResourceMemory: resource.MustParse("10Gi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
},
}},
},
} {
t.Run(c.desc, func(t *testing.T) {
got := resolveResourceRequests(c.in,
&corev1.LimitRange{
Spec: corev1.LimitRangeSpec{
Limits: []corev1.LimitRangeItem{
{
Type: "Container",
Min: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("99Mi"),
corev1.ResourceEphemeralStorage: resource.MustParse("100m"),
},
},
},
},
})
if d := cmp.Diff(c.want, got, resourceQuantityCmp); d != "" {
t.Errorf("Diff(-want, +got): %s", d)
}
Expand Down

0 comments on commit 482085d

Please sign in to comment.