Skip to content

Commit

Permalink
Merge pull request #2141 from bertinatto/bump-v1.31.3
Browse files Browse the repository at this point in the history
WRKLDS-1449: Update to Kubernetes v1.31.3
  • Loading branch information
openshift-merge-bot[bot] authored Nov 26, 2024
2 parents e3abfef + 247b4a9 commit 8ac36bf
Show file tree
Hide file tree
Showing 14 changed files with 1,385 additions and 968 deletions.
222 changes: 173 additions & 49 deletions CHANGELOG/CHANGELOG-1.31.md

Large diffs are not rendered by default.

26 changes: 26 additions & 0 deletions cmd/kube-controller-manager/app/controllermanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,3 +220,29 @@ func TestTaintEvictionControllerGating(t *testing.T) {
})
}
}

func TestNoCloudProviderControllerStarted(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()

controllerCtx := ControllerContext{
Cloud: nil,
LoopMode: IncludeCloudLoops,
}
controllerCtx.ComponentConfig.Generic.Controllers = []string{"*"}
for _, controller := range NewControllerDescriptors() {
if !controller.IsCloudProviderController() {
continue
}

controllerName := controller.Name()
checker, err := StartController(ctx, controllerCtx, controller, nil)
if err != nil {
t.Errorf("Error starting controller %q: %v", controllerName, err)
}
if checker != nil {
t.Errorf("Controller %q should not be started", controllerName)
}
}
}
13 changes: 12 additions & 1 deletion cmd/kube-controller-manager/app/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,12 @@ func newServiceLBControllerDescriptor() *ControllerDescriptor {
}

func startServiceLBController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
logger := klog.FromContext(ctx)
if controllerContext.Cloud == nil {
logger.Info("Warning: service-controller is set, but no cloud provider specified. Will not configure service controller.")
return nil, false, nil
}

serviceController, err := servicecontroller.New(
controllerContext.Cloud,
controllerContext.ClientBuilder.ClientOrDie("service-controller"),
Expand All @@ -102,7 +108,7 @@ func startServiceLBController(ctx context.Context, controllerContext ControllerC
)
if err != nil {
// This error shouldn't fail. It lives like this as a legacy.
klog.FromContext(ctx).Error(err, "Failed to start service controller")
logger.Error(err, "Failed to start service controller.")
return nil, false, nil
}
go serviceController.Run(ctx, int(controllerContext.ComponentConfig.ServiceController.ConcurrentServiceSyncs), controllerContext.ControllerManagerMetrics)
Expand Down Expand Up @@ -261,6 +267,11 @@ func newCloudNodeLifecycleControllerDescriptor() *ControllerDescriptor {

func startCloudNodeLifecycleController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
logger := klog.FromContext(ctx)
if controllerContext.Cloud == nil {
logger.Info("Warning: node-controller is set, but no cloud provider specified. Will not configure node lifecyle controller.")
return nil, false, nil
}

cloudNodeLifecycleController, err := cloudnodelifecyclecontroller.NewCloudNodeLifecycleController(
controllerContext.InformerFactory.Core().V1().Nodes(),
// cloud node lifecycle controller uses existing cluster role from node-controller
Expand Down
2 changes: 1 addition & 1 deletion openshift-hack/images/hyperkube/Dockerfile.rhel
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ COPY --from=builder /tmp/build/* /usr/bin/
LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \
io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \
io.openshift.tags="openshift,hyperkube" \
io.openshift.build.versions="kubernetes=1.31.2"
io.openshift.build.versions="kubernetes=1.31.3"
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ func RunWithLeaderElection(ctx context.Context, config *rest.Config, newRunnerFn
run(ctx, 1)
},
OnStoppedLeading: func() {
cancel()
if cancel != nil {
cancel()
}
},
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,8 @@ func (pl *DefaultPreemption) SelectVictimsOnNode(
}
var victims []*v1.Pod
numViolatingVictim := 0
// Sort potentialVictims by pod priority from high to low, which ensures to
// reprieve higher priority pods first.
sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i].Pod, potentialVictims[j].Pod) })
// Try to reprieve as many pods as possible. We first try to reprieve the PDB
// violating victims and then other non-violating ones. In both cases, we start
Expand Down Expand Up @@ -225,6 +227,11 @@ func (pl *DefaultPreemption) SelectVictimsOnNode(
return nil, 0, framework.AsStatus(err)
}
}

// Sort victims after reprieving pods to keep the pods in the victims sorted in order of priority from high to low.
if len(violatingVictims) != 0 && len(nonViolatingVictims) != 0 {
sort.Slice(victims, func(i, j int) bool { return util.MoreImportantPod(victims[i], victims[j]) })
}
return victims, numViolatingVictim, framework.NewStatus(framework.Success)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"sort"
"strings"
Expand Down Expand Up @@ -142,13 +143,20 @@ func (pl *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, p
return nil
}

const (
LabelKeyIsViolatingPDB = "test.kubernetes.io/is-violating-pdb"
LabelValueViolatingPDB = "violating"
LabelValueNonViolatingPDB = "non-violating"
)

func TestPostFilter(t *testing.T) {
onePodRes := map[v1.ResourceName]string{v1.ResourcePods: "1"}
nodeRes := map[v1.ResourceName]string{v1.ResourceCPU: "200m", v1.ResourceMemory: "400"}
tests := []struct {
name string
pod *v1.Pod
pods []*v1.Pod
pdbs []*policy.PodDisruptionBudget
nodes []*v1.Node
filteredNodesStatuses framework.NodeToStatusMap
extender framework.Extender
Expand Down Expand Up @@ -218,6 +226,29 @@ func TestPostFilter(t *testing.T) {
wantResult: framework.NewPostFilterResultWithNominatedNode("node2"),
wantStatus: framework.NewStatus(framework.Success),
},
{
name: "pod can be made schedulable on minHighestPriority node",
pod: st.MakePod().Name("p").UID("p").Namespace(v1.NamespaceDefault).Priority(veryHighPriority).Obj(),
pods: []*v1.Pod{
st.MakePod().Name("p1").UID("p1").Label(LabelKeyIsViolatingPDB, LabelValueNonViolatingPDB).Namespace(v1.NamespaceDefault).Priority(highPriority).Node("node1").Obj(),
st.MakePod().Name("p2").UID("p2").Label(LabelKeyIsViolatingPDB, LabelValueViolatingPDB).Namespace(v1.NamespaceDefault).Priority(lowPriority).Node("node1").Obj(),
st.MakePod().Name("p3").UID("p3").Label(LabelKeyIsViolatingPDB, LabelValueViolatingPDB).Namespace(v1.NamespaceDefault).Priority(midPriority).Node("node2").Obj(),
},
pdbs: []*policy.PodDisruptionBudget{
st.MakePDB().Name("violating-pdb").Namespace(v1.NamespaceDefault).MatchLabel(LabelKeyIsViolatingPDB, LabelValueViolatingPDB).MinAvailable("100%").Obj(),
st.MakePDB().Name("non-violating-pdb").Namespace(v1.NamespaceDefault).MatchLabel(LabelKeyIsViolatingPDB, LabelValueNonViolatingPDB).MinAvailable("0").DisruptionsAllowed(math.MaxInt32).Obj(),
},
nodes: []*v1.Node{
st.MakeNode().Name("node1").Capacity(onePodRes).Obj(),
st.MakeNode().Name("node2").Capacity(onePodRes).Obj(),
},
filteredNodesStatuses: framework.NodeToStatusMap{
"node1": framework.NewStatus(framework.Unschedulable),
"node2": framework.NewStatus(framework.Unschedulable),
},
wantResult: framework.NewPostFilterResultWithNominatedNode("node2"),
wantStatus: framework.NewStatus(framework.Success),
},
{
name: "preemption result filtered out by extenders",
pod: st.MakePod().Name("p").UID("p").Namespace(v1.NamespaceDefault).Priority(highPriority).Obj(),
Expand Down Expand Up @@ -347,6 +378,13 @@ func TestPostFilter(t *testing.T) {
for i := range tt.pods {
podInformer.GetStore().Add(tt.pods[i])
}
pdbInformer := informerFactory.Policy().V1().PodDisruptionBudgets().Informer()
for i := range tt.pdbs {
if err := pdbInformer.GetStore().Add(tt.pdbs[i]); err != nil {
t.Fatal(err)
}
}

// Register NodeResourceFit as the Filter & PreFilter plugin.
registeredPlugins := []tf.RegisterPluginFunc{
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
Expand Down
60 changes: 60 additions & 0 deletions pkg/scheduler/testing/wrappers.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@ import (
"time"

v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/ptr"
)
Expand Down Expand Up @@ -212,6 +214,64 @@ func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *Co
return c
}

// PodDisruptionBudgetWrapper wraps a PodDisruptionBudget inside.
type PodDisruptionBudgetWrapper struct {
policy.PodDisruptionBudget
}

// MakePDB creates a PodDisruptionBudget wrapper.
func MakePDB() *PodDisruptionBudgetWrapper {
return &PodDisruptionBudgetWrapper{policy.PodDisruptionBudget{}}
}

// Obj returns the inner PodDisruptionBudget.
func (p *PodDisruptionBudgetWrapper) Obj() *policy.PodDisruptionBudget {
return &p.PodDisruptionBudget
}

// Name sets `name` as the name of the inner PodDisruptionBudget.
func (p *PodDisruptionBudgetWrapper) Name(name string) *PodDisruptionBudgetWrapper {
p.SetName(name)
return p
}

// Namespace sets `namespace` as the namespace of the inner PodDisruptionBudget.
func (p *PodDisruptionBudgetWrapper) Namespace(namespace string) *PodDisruptionBudgetWrapper {
p.SetNamespace(namespace)
return p
}

// MinAvailable sets `minAvailable` to the inner PodDisruptionBudget.Spec.MinAvailable.
func (p *PodDisruptionBudgetWrapper) MinAvailable(minAvailable string) *PodDisruptionBudgetWrapper {
p.Spec.MinAvailable = &intstr.IntOrString{
Type: intstr.String,
StrVal: minAvailable,
}
return p
}

// MatchLabel adds a {key,value} to the inner PodDisruptionBudget.Spec.Selector.MatchLabels.
func (p *PodDisruptionBudgetWrapper) MatchLabel(key, value string) *PodDisruptionBudgetWrapper {
selector := p.Spec.Selector
if selector == nil {
selector = &metav1.LabelSelector{}
}
matchLabels := selector.MatchLabels
if matchLabels == nil {
matchLabels = map[string]string{}
}
matchLabels[key] = value
selector.MatchLabels = matchLabels
p.Spec.Selector = selector
return p
}

// DisruptionsAllowed sets `disruptionsAllowed` to the inner PodDisruptionBudget.Status.DisruptionsAllowed.
func (p *PodDisruptionBudgetWrapper) DisruptionsAllowed(disruptionsAllowed int32) *PodDisruptionBudgetWrapper {
p.Status.DisruptionsAllowed = disruptionsAllowed
return p
}

// PodWrapper wraps a Pod inside.
type PodWrapper struct{ v1.Pod }

Expand Down
2 changes: 1 addition & 1 deletion staging/src/k8s.io/apiserver/pkg/server/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -1234,7 +1234,7 @@ func AuthorizeClientBearerToken(loopback *restclient.Config, authn *Authenticati
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
Name: user.APIServerUser,
UID: uid,
Groups: []string{user.SystemPrivilegedGroup},
Groups: []string{user.AllAuthenticated, user.SystemPrivilegedGroup},
}

tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens, authn.APIAudiences)
Expand Down
29 changes: 29 additions & 0 deletions staging/src/k8s.io/apiserver/pkg/server/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
"k8s.io/apiserver/pkg/audit/policy"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/server/healthz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
Expand Down Expand Up @@ -83,6 +84,34 @@ func TestAuthorizeClientBearerTokenNoops(t *testing.T) {
}
}

func TestAuthorizeClientBearerTokenRequiredGroups(t *testing.T) {
fakeAuthenticator := authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) {
return &authenticator.Response{User: &user.DefaultInfo{}}, false, nil
})
fakeAuthorizer := authorizer.AuthorizerFunc(func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
return authorizer.DecisionAllow, "", nil
})
target := &rest.Config{BearerToken: "secretToken"}
authN := &AuthenticationInfo{Authenticator: fakeAuthenticator}
authC := &AuthorizationInfo{Authorizer: fakeAuthorizer}

AuthorizeClientBearerToken(target, authN, authC)

fakeRequest, err := http.NewRequest("", "", nil)
if err != nil {
t.Fatal(err)
}
fakeRequest.Header.Set("Authorization", "bearer secretToken")
rsp, _, err := authN.Authenticator.AuthenticateRequest(fakeRequest)
if err != nil {
t.Fatal(err)
}
expectedGroups := []string{user.AllAuthenticated, user.SystemPrivilegedGroup}
if !reflect.DeepEqual(expectedGroups, rsp.User.GetGroups()) {
t.Fatalf("unexpected groups = %v returned, expected = %v", rsp.User.GetGroups(), expectedGroups)
}
}

func TestNewWithDelegate(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancelCause(ctx)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -904,6 +904,7 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
case 1:
podList.ListMeta = metav1.ListMeta{ResourceVersion: "10"}
default:
t.Errorf("unexpected list call: %d", listCalls)
err = fmt.Errorf("unexpected list call")
}
listCalls++
Expand All @@ -926,8 +927,11 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
for i := 12; i < 18; i++ {
w.Add(makePod(i))
}
w.Stop()
// Keep the watch open to avoid another reinitialization,
// but register it for cleanup.
t.Cleanup(func() { w.Stop() })
default:
t.Errorf("unexpected watch call: %d", watchCalls)
err = fmt.Errorf("unexpected watch call")
}
watchCalls++
Expand All @@ -949,7 +953,6 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()

errCh := make(chan error, concurrency)
for i := 0; i < concurrency; i++ {
go func() {
defer wg.Done()
Expand All @@ -973,11 +976,11 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
}
rv, err := strconv.Atoi(object.(*example.Pod).ResourceVersion)
if err != nil {
errCh <- fmt.Errorf("incorrect resource version: %v", err)
t.Errorf("incorrect resource version: %v", err)
return
}
if prevRV != -1 && prevRV+1 != rv {
errCh <- fmt.Errorf("unexpected event received, prevRV=%d, rv=%d", prevRV, rv)
t.Errorf("unexpected event received, prevRV=%d, rv=%d", prevRV, rv)
return
}
prevRV = rv
Expand All @@ -986,11 +989,6 @@ func TestCacherDontMissEventsOnReinitialization(t *testing.T) {
}()
}
wg.Wait()
close(errCh)

for err := range errCh {
t.Error(err)
}
}

func TestCacherNoLeakWithMultipleWatchers(t *testing.T) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ func New(
featureGate featuregate.FeatureGate,
) (*Controller, error) {
registerMetrics()

s := &Controller{
cloud: cloud,
kubeClient: kubeClient,
Expand All @@ -128,6 +129,10 @@ func New(
lastSyncedNodes: make(map[string][]*v1.Node),
}

if err := s.init(); err != nil {
return nil, err
}

serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: func(cur interface{}) {
Expand Down Expand Up @@ -182,10 +187,6 @@ func New(
nodeSyncPeriod,
)

if err := s.init(); err != nil {
return nil, err
}

return s, nil
}

Expand Down
Loading

0 comments on commit 8ac36bf

Please sign in to comment.