Skip to content

Commit

Permalink
Use an Informer to list LimitRanges 🥼
Browse files Browse the repository at this point in the history
Using a LimitRange lister here instead, so this doesn't end up hitting
the real API server on each call.

Taking into account a review :
#4176 (comment).

Signed-off-by: Vincent Demeester <[email protected]>
  • Loading branch information
vdemeester committed Sep 16, 2021
1 parent ff860df commit 9108729
Show file tree
Hide file tree
Showing 9 changed files with 219 additions and 57 deletions.
7 changes: 0 additions & 7 deletions internal/builder/v1beta1/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package builder

import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

Expand Down Expand Up @@ -124,9 +123,6 @@ func PodContainer(name, image string, ops ...ContainerOp) PodSpecOp {
c := &corev1.Container{
Name: name,
Image: image,
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{},
},
}
for _, op := range ops {
op(c)
Expand All @@ -143,9 +139,6 @@ func PodInitContainer(name, image string, ops ...ContainerOp) PodSpecOp {
Name: name,
Image: image,
Args: []string{},
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{},
},
}
for _, op := range ops {
op(c)
Expand Down
3 changes: 0 additions & 3 deletions internal/builder/v1beta1/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,6 @@ func TestPod(t *testing.T) {
Containers: []corev1.Container{{
Name: "nop",
Image: "nop:latest",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{},
},
}},
InitContainers: []corev1.Container{{
Name: "basic",
Expand Down
21 changes: 12 additions & 9 deletions pkg/internal/limitrange/limitrange.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,23 +20,25 @@ import (

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
limitrangeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange"
)

func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.Interface) (*corev1.LimitRange, error) {
limitRanges, err := c.CoreV1().LimitRanges(namespace).List(ctx, metav1.ListOptions{})
limitrangeInformer := limitrangeinformer.Get(ctx)
limitRanges, err := limitrangeInformer.Lister().LimitRanges(namespace).List(labels.Everything())
if err != nil {
return nil, err
}
var limitRange corev1.LimitRange
var limitRange *corev1.LimitRange
switch {
case len(limitRanges.Items) == 0:
case len(limitRanges) == 0:
// No LimitRange defined
break
case len(limitRanges.Items) == 1:
case len(limitRanges) == 1:
// One LimitRange defined
limitRange = limitRanges.Items[0]
limitRange = limitRanges[0]
default:
// Several LimitRange defined
// Create a virtual LimitRange with
Expand All @@ -45,8 +47,9 @@ func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.In
// - Default that "fits" into min/max taken above
// - Default request that "fits" into min/max taken above
// - Smallest ratio (aka the most restrictive one)
limitRange = &corev1.LimitRange{}
m := map[corev1.LimitType]corev1.LimitRangeItem{}
for _, lr := range limitRanges.Items {
for _, lr := range limitRanges {
for _, item := range lr.Spec.Limits {
_, exists := m[item.Type]
if !exists {
Expand Down Expand Up @@ -74,7 +77,7 @@ func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.In
}
}
// Handle Default and DefaultRequest
for _, lr := range limitRanges.Items {
for _, lr := range limitRanges {
for _, item := range lr.Spec.Limits {
// Default
m[item.Type].Default[corev1.ResourceCPU] = minOfBetween(m[item.Type].Default[corev1.ResourceCPU], item.Default[corev1.ResourceCPU], m[item.Type].Min[corev1.ResourceCPU], m[item.Type].Max[corev1.ResourceCPU])
Expand All @@ -90,7 +93,7 @@ func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.In
limitRange.Spec.Limits = append(limitRange.Spec.Limits, v)
}
}
return &limitRange, nil
return limitRange, nil
}

func maxOf(a, b resource.Quantity) resource.Quantity {
Expand Down
67 changes: 48 additions & 19 deletions pkg/internal/limitrange/transformer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,16 @@ import (
"testing"

"github.com/google/go-cmp/cmp"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
fakek8s "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes"
fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake"
fakelimitrangeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake"
fakeserviceaccountinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount/fake"
)

var resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
Expand Down Expand Up @@ -405,15 +409,16 @@ func TestTransformerOneContainer(t *testing.T) {
},
}} {
t.Run(tc.description, func(t *testing.T) {
kubeclient := fakek8s.NewSimpleClientset(
&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}},
&corev1.LimitRange{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"},
ctx, kubeclient, cancel := setup(t,
[]corev1.ServiceAccount{{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}},
[]corev1.LimitRange{{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"},
Spec: corev1.LimitRangeSpec{
Limits: tc.limitranges,
},
},
}},
)
f := NewTransformer(context.Background(), "default", kubeclient)
defer cancel()
f := NewTransformer(ctx, "default", kubeclient)
got, err := f(&corev1.Pod{
Spec: tc.podspec,
})
Expand Down Expand Up @@ -817,15 +822,16 @@ func TestTransformerMultipleContainer(t *testing.T) {
},
}} {
t.Run(tc.description, func(t *testing.T) {
kubeclient := fakek8s.NewSimpleClientset(
&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}},
&corev1.LimitRange{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"},
ctx, kubeclient, cancel := setup(t,
[]corev1.ServiceAccount{{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}},
[]corev1.LimitRange{{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"},
Spec: corev1.LimitRangeSpec{
Limits: tc.limitranges,
},
},
}},
)
f := NewTransformer(context.Background(), "default", kubeclient)
defer cancel()
f := NewTransformer(ctx, "default", kubeclient)
got, err := f(&corev1.Pod{
Spec: tc.podspec,
})
Expand Down Expand Up @@ -943,13 +949,12 @@ func TestTransformerOneContainerMultipleLimitRange(t *testing.T) {
},
}} {
t.Run(tc.description, func(t *testing.T) {
runtimeObjects := []runtime.Object{&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}}
for _, l := range tc.limitranges {
l := l // because we use pointer, we need to descope this...
runtimeObjects = append(runtimeObjects, &l)
}
kubeclient := fakek8s.NewSimpleClientset(runtimeObjects...)
f := NewTransformer(context.Background(), "default", kubeclient)
ctx, kubeclient, cancel := setup(t,
[]corev1.ServiceAccount{{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}},
tc.limitranges,
)
defer cancel()
f := NewTransformer(ctx, "default", kubeclient)
got, err := f(&corev1.Pod{
Spec: tc.podspec,
})
Expand Down Expand Up @@ -985,3 +990,27 @@ func cmpRequestsAndLimits(t *testing.T, want, got corev1.PodSpec) {
}
}
}

func setup(t *testing.T, serviceaccounts []corev1.ServiceAccount, limitranges []corev1.LimitRange) (context.Context, kubernetes.Interface, func()) {
ctx, _ := ttesting.SetupFakeContext(t)
ctx, cancel := context.WithCancel(ctx)
kubeclient := fakekubeclient.Get(ctx)
// LimitRange
limitRangeInformer := fakelimitrangeinformer.Get(ctx)
kubeclient.PrependReactor("*", "limitranges", test.AddToInformer(t, limitRangeInformer.Informer().GetIndexer()))
for _, tl := range limitranges {
if _, err := kubeclient.CoreV1().LimitRanges(tl.Namespace).Create(ctx, &tl, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
// ServiceAccount
serviceAccountInformer := fakeserviceaccountinformer.Get(ctx)
kubeclient.PrependReactor("*", "serviceaccounts", test.AddToInformer(t, serviceAccountInformer.Informer().GetIndexer()))
for _, ts := range serviceaccounts {
if _, err := kubeclient.CoreV1().ServiceAccounts(ts.Namespace).Create(ctx, &ts, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
kubeclient.ClearActions()
return ctx, kubeclient, cancel
}
28 changes: 9 additions & 19 deletions pkg/reconciler/taskrun/taskrun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1814,7 +1814,7 @@ func TestReconcile_SetsStartTime(t *testing.T) {
t.Fatal(err)
}

if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err == nil {
if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err == nil {
t.Error("Wanted a wrapped requeue error, but got nil.")
} else if ok, _ := controller.IsRequeueKey(err); !ok {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
Expand Down Expand Up @@ -1851,7 +1851,7 @@ func TestReconcile_DoesntChangeStartTime(t *testing.T) {
testAssets, cancel := getTaskRunController(t, d)
defer cancel()

if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}

Expand Down Expand Up @@ -1964,7 +1964,7 @@ func TestReconcileTaskRunWithPermanentError(t *testing.T) {
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
reconcileErr := c.Reconciler.Reconcile(context.Background(), getRunName(noTaskRun))
reconcileErr := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(noTaskRun))

// When a TaskRun was rejected with a permanent error, reconciler must stop and forget about the run
// Such TaskRun enters Reconciler and from within the isDone block, marks the run success so that
Expand Down Expand Up @@ -2392,9 +2392,6 @@ func TestExpandMountPath(t *testing.T) {
testAssets, cancel := getTaskRunController(t, d)
defer cancel()

ctx, cancel := context.WithCancel(context.TODO())
defer cancel()

// c := testAssets.Controller
clients := testAssets.Clients
saName := "default"
Expand Down Expand Up @@ -2427,7 +2424,7 @@ func TestExpandMountPath(t *testing.T) {
TaskSpec: &v1beta1.TaskSpec{Steps: simpleTask.Spec.Steps, Workspaces: simpleTask.Spec.Workspaces},
}

pod, err := r.createPod(ctx, taskRun, rtr)
pod, err := r.createPod(testAssets.Ctx, taskRun, rtr)

if err != nil {
t.Fatalf("create pod threw error %v", err)
Expand Down Expand Up @@ -2486,10 +2483,6 @@ func TestExpandMountPath_DuplicatePaths(t *testing.T) {

testAssets, cancel := getTaskRunController(t, d)
defer cancel()

ctx, cancel := context.WithCancel(context.TODO())
defer cancel()

clients := testAssets.Clients
saName := "default"
if _, err := clients.Kube.CoreV1().ServiceAccounts(taskRun.Namespace).Create(testAssets.Ctx, &corev1.ServiceAccount{
Expand Down Expand Up @@ -2520,7 +2513,7 @@ func TestExpandMountPath_DuplicatePaths(t *testing.T) {
TaskSpec: &v1beta1.TaskSpec{Steps: simpleTask.Spec.Steps, Workspaces: simpleTask.Spec.Workspaces},
}

_, err := r.createPod(ctx, taskRun, rtr)
_, err := r.createPod(testAssets.Ctx, taskRun, rtr)

if err == nil || err.Error() != expectedError {
t.Errorf("Expected to fail validation for duplicate Workspace mount paths, error was %v", err)
Expand All @@ -2544,9 +2537,6 @@ func TestHandlePodCreationError(t *testing.T) {
testAssets, cancel := getTaskRunController(t, d)
defer cancel()

ctx, cancel := context.WithCancel(context.TODO())
defer cancel()

// Use the test assets to create a *Reconciler directly for focused testing.
c := &Reconciler{
KubeClientSet: testAssets.Clients.Kube,
Expand Down Expand Up @@ -2588,7 +2578,7 @@ func TestHandlePodCreationError(t *testing.T) {
}}
for _, tc := range testcases {
t.Run(tc.description, func(t *testing.T) {
c.handlePodCreationError(ctx, taskRun, tc.err)
c.handlePodCreationError(testAssets.Ctx, taskRun, tc.err)
foundCondition := false
for _, cond := range taskRun.Status.Conditions {
if cond.Type == tc.expectedType && cond.Status == tc.expectedStatus && cond.Reason == tc.expectedReason {
Expand Down Expand Up @@ -3112,7 +3102,7 @@ func TestReconcileValidDefaultWorkspaceOmittedOptionalWorkspace(t *testing.T) {
t.Fatal(err)
}

if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRunOmittingWorkspace)); err == nil {
if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRunOmittingWorkspace)); err == nil {
t.Error("Wanted a wrapped requeue error, but got nil.")
} else if ok, _ := controller.IsRequeueKey(err); !ok {
t.Errorf("Unexpected reconcile error for TaskRun %q: %v", taskRunOmittingWorkspace.Name, err)
Expand Down Expand Up @@ -3280,7 +3270,7 @@ func TestReconcileWithWorkspacesIncompatibleWithAffinityAssistant(t *testing.T)
t.Fatal(err)
}

_ = testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun))
_ = testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun))

_, err := clients.Pipeline.TektonV1beta1().Tasks(taskRun.Namespace).Get(testAssets.Ctx, taskWithTwoWorkspaces.Name, metav1.GetOptions{})
if err != nil {
Expand Down Expand Up @@ -3343,7 +3333,7 @@ func TestReconcileWorkspaceWithVolumeClaimTemplate(t *testing.T) {
t.Fatal(err)
}

if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err == nil {
if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err == nil {
t.Error("Wanted a wrapped requeue error, but got nil.")
} else if ok, _ := controller.IsRequeueKey(err); !ok {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
Expand Down
2 changes: 2 additions & 0 deletions test/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ import (
fakefilteredpodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered/fake"
fakeserviceaccountinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount/fake"
"knative.dev/pkg/controller"

_ "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake" // force register limitrange fake informers
)

// Data represents the desired state of the system (i.e. existing resources) to seed controllers
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 9108729

Please sign in to comment.