Skip to content
This repository has been archived by the owner on Apr 25, 2023. It is now read-only.

Webhook checking for KubeFedConfig #941

Merged
merged 5 commits into from
Jun 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# Unreleased
- [#941](https://github.com/kubernetes-sigs/kubefed/issues/941) Adds
admission webhook validations for KubeFedConfig API.
- [#909](https://github.com/kubernetes-sigs/kubefed/issues/909) Adds
admission webhook validations for FederatedTypeConfig API.

Expand Down
14 changes: 14 additions & 0 deletions charts/kubefed/charts/controllermanager/templates/clusterrole.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,17 @@ rules:
- update
- patch
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:kubefed:admission-requester
rules:
- apiGroups:
- admission.core.kubefed.k8s.io
resources:
- federatedtypeconfigs
- kubefedclusters
- kubefedconfigs
verbs:
- create
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,29 @@ subjects:
name: kubefed-controller
namespace: {{ .Release.Namespace }}
{{- end }}
---
marun marked this conversation as resolved.
Show resolved Hide resolved
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: auth-delegator-kubefed-admission-webhook
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: kubefed-admission-webhook
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: anonymous-kubefed-admission-webhook-auth
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: system:kubefed:admission-requester
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:anonymous
Original file line number Diff line number Diff line change
Expand Up @@ -43,19 +43,6 @@ subjects:
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: auth-delegator-kubefed-admission-webhook
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: kubefed-admission-webhook
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubefed-admission-webhook-apiextension-viewer
Expand All @@ -68,16 +55,3 @@ subjects:
- kind: ServiceAccount
name: kubefed-admission-webhook
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: anonymous-kubefed-admission-webhook-auth
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: system:kubefed:admission-requester
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:anonymous
13 changes: 0 additions & 13 deletions charts/kubefed/charts/controllermanager/templates/roles.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -99,16 +99,3 @@ rules:
- get
- watch
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:kubefed:admission-requester
rules:
- apiGroups:
- admission.core.kubefed.k8s.io
resources:
- federatedtypeconfigs
- kubefedclusters
verbs:
- create
24 changes: 24 additions & 0 deletions charts/kubefed/charts/controllermanager/templates/webhook.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,30 @@ webhooks:
- "kubefedclusters"
failurePolicy: Fail
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: "kubefedconfigs.core.kubefed.k8s.io"
webhooks:
- name: kubefedconfigs.core.kubefed.k8s.io
clientConfig:
service:
namespace: {{ .Release.Namespace | quote }}
name: kubefed-admission-webhook
path: /apis/admission.core.kubefed.k8s.io/v1beta1/kubefedconfigs
caBundle: {{ b64enc $ca.Cert | quote }}
rules:
- operations:
- "CREATE"
- "UPDATE"
apiGroups:
- "core.kubefed.k8s.io"
apiVersions:
- "v1beta1"
resources:
- "kubefedconfigs"
failurePolicy: Fail
---
apiVersion: v1
kind: Secret
metadata:
Expand Down
4 changes: 2 additions & 2 deletions cmd/controller-manager/app/controller-manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ func setInt64(target *int64, defaultValue int64) {
}
}

func setDefaultKubeFedConfig(fedConfig *corev1b1.KubeFedConfig) {
func SetDefaultKubeFedConfig(fedConfig *corev1b1.KubeFedConfig) {
spec := &fedConfig.Spec

if len(spec.Scope) == 0 {
Expand Down Expand Up @@ -325,7 +325,7 @@ func setOptionsByKubeFedConfig(opts *options.Options) {
}
}

setDefaultKubeFedConfig(fedConfig)
SetDefaultKubeFedConfig(fedConfig)

spec := fedConfig.Spec
opts.Scope = spec.Scope
Expand Down
1 change: 1 addition & 0 deletions config/kubefedconfig.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ metadata:
name: kubefed
namespace: kube-federation-system
spec:
scope: Cluster
leaderElect:
leaseDuration: 1500ms
renewDeadline: 1000ms
Expand Down
77 changes: 77 additions & 0 deletions pkg/apis/core/v1beta1/validation/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,18 @@ package validation

import (
"strings"
"time"

apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apimachineryval "k8s.io/apimachinery/pkg/api/validation"
valutil "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/klog"

"sigs.k8s.io/kubefed/pkg/apis/core/typeconfig"
"sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
"sigs.k8s.io/kubefed/pkg/features"
)

func ValidateFederatedTypeConfig(obj *v1beta1.FederatedTypeConfig, statusSubResource bool) field.ErrorList {
Expand Down Expand Up @@ -139,6 +143,79 @@ func ValidateFederatedTypeConfigStatus(status *v1beta1.FederatedTypeConfigStatus
return allErrs
}

func ValidateKubeFedConfig(kubeFedConfig *v1beta1.KubeFedConfig) field.ErrorList {
klog.V(2).Infof("Validating KubeFedConfig %q", kubeFedConfig.Name)

allErrs := field.ErrorList{}

spec := kubeFedConfig.Spec
specPath := field.NewPath("spec")
allErrs = append(allErrs, validateEnumStrings(specPath.Child("scope"), string(spec.Scope),
[]string{string(apiextv1b1.ClusterScoped), string(apiextv1b1.NamespaceScoped)})...)

duration := spec.ControllerDuration
durationPath := specPath.Child("controllerDuration")
allErrs = append(allErrs, validateGreaterThan0(durationPath.Child("availableDelay"), int64(duration.AvailableDelay.Duration))...)
allErrs = append(allErrs, validateGreaterThan0(durationPath.Child("unavailableDelay"), int64(duration.UnavailableDelay.Duration))...)

elect := spec.LeaderElect
electPath := specPath.Child("leaderElect")
allErrs = append(allErrs, validateGreaterThan0(electPath.Child("leaseDuration"), int64(elect.LeaseDuration.Duration))...)
allErrs = append(allErrs, validateGreaterThan0(electPath.Child("renewDeadline"), int64(elect.RenewDeadline.Duration))...)
allErrs = append(allErrs, validateGreaterThan0(electPath.Child("retryPeriod"), int64(elect.RetryPeriod.Duration))...)
if elect.LeaseDuration.Duration <= elect.RenewDeadline.Duration {
allErrs = append(allErrs, field.Invalid(electPath.Child("leaseDuration"), elect.LeaseDuration,
"leaseDuration must be greater than renewDeadline"))
}
if elect.RenewDeadline.Duration <= time.Duration(float64(elect.RetryPeriod.Duration)*leaderelection.JitterFactor) {
allErrs = append(allErrs, field.Invalid(electPath.Child("renewDeadline"), elect.RenewDeadline,
"renewDeadline must be greater than retryPeriod*JitterFactor"))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please include the jitter factor in the message.

Also, is there a reason the jitter factor isn't configurable?

cc: @shashidharatd

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems JitterFactor is agreed upon constant https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go#L72 and is not exposed by the leader election library. lets keep it like that for now.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, so it should remain non-configurable. Is there a reason it should not be exposed in the error message though? I think the value should either be explicitly mentioned in the error message or mention of JitterFactor removed.

}
allErrs = append(allErrs, validateEnumStrings(electPath.Child("resourceLock"), string(elect.ResourceLock),
[]string{string(v1beta1.ConfigMapsResourceLock), string(v1beta1.EndpointsResourceLock)})...)

gates := spec.FeatureGates
gatesPath := specPath.Child("featureGates")
existingNames := make(map[string]bool)
for _, gate := range gates {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please ensure that a gate appears at most once.

_, ok := existingNames[gate.Name]
if ok {
allErrs = append(allErrs, field.Duplicate(gatesPath.Child("name"), gate.Name))
continue
}
existingNames[gate.Name] = true

allErrs = append(allErrs, validateEnumStrings(gatesPath.Child("name"), string(gate.Name),
[]string{string(features.PushReconciler), string(features.SchedulerPreferences),
string(features.CrossClusterServiceDiscovery), string(features.FederatedIngress)})...)

allErrs = append(allErrs, validateEnumStrings(gatesPath.Child("configuration"), string(gate.Configuration),
[]string{string(v1beta1.ConfigurationEnabled), string(v1beta1.ConfigurationDisabled)})...)
}

health := spec.ClusterHealthCheck
healthPath := specPath.Child("clusterHealthCheck")
allErrs = append(allErrs, validateGreaterThan0(healthPath.Child("periodSeconds"), health.PeriodSeconds)...)
allErrs = append(allErrs, validateGreaterThan0(healthPath.Child("failureThreshold"), health.FailureThreshold)...)
allErrs = append(allErrs, validateGreaterThan0(healthPath.Child("successThreshold"), health.SuccessThreshold)...)
allErrs = append(allErrs, validateGreaterThan0(healthPath.Child("timeoutSeconds"), health.TimeoutSeconds)...)

sync := spec.SyncController
syncPath := specPath.Child("syncController")
allErrs = append(allErrs, validateEnumStrings(syncPath.Child("adoptResources"), string(sync.AdoptResources),
[]string{string(v1beta1.AdoptResourcesEnabled), string(v1beta1.AdoptResourcesDisabled)})...)

return allErrs
}

func validateGreaterThan0(path *field.Path, value int64) field.ErrorList {
errs := field.ErrorList{}
if value <= 0 {
errs = append(errs, field.Invalid(path, value, "should be greater than 0"))
}
return errs
}

func ValidateKubeFedCluster(object *v1beta1.KubeFedCluster) field.ErrorList {
allErrs := field.ErrorList{}
return allErrs
Expand Down
119 changes: 119 additions & 0 deletions pkg/apis/core/v1beta1/validation/validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,15 @@ import (
"strings"
"testing"

apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"

"sigs.k8s.io/kubefed/cmd/controller-manager/app"
"sigs.k8s.io/kubefed/pkg/apis/core/typeconfig"
"sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
"sigs.k8s.io/kubefed/pkg/controller/util"
"sigs.k8s.io/kubefed/pkg/features"
"sigs.k8s.io/kubefed/pkg/kubefedctl/enable"
"sigs.k8s.io/kubefed/pkg/kubefedctl/options"
)
Expand Down Expand Up @@ -288,3 +292,118 @@ func federatedTypeConfig(apiResource *metav1.APIResource) *v1beta1.FederatedType
}
return ftc
}

func TestValidateKubeFedConfig(t *testing.T) {
errs := ValidateKubeFedConfig(validKubeFedConfig())
if len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}

errorCases := map[string]*v1beta1.KubeFedConfig{}

validScope := validKubeFedConfig()
marun marked this conversation as resolved.
Show resolved Hide resolved
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is an invalid scope

validScope.Spec.Scope = "NeitherClusterOrNamespaceScoped"
errorCases["spec.scope: Unsupported value"] = validScope

validAvailableDelayGreaterThan0 := validKubeFedConfig()
validAvailableDelayGreaterThan0.Spec.ControllerDuration.AvailableDelay.Duration = 0
errorCases["spec.controllerDuration.availableDelay: Invalid value"] = validAvailableDelayGreaterThan0

validUnavailableDelayGreaterThan0 := validKubeFedConfig()
validUnavailableDelayGreaterThan0.Spec.ControllerDuration.UnavailableDelay.Duration = 0
errorCases["spec.controllerDuration.unavailableDelay: Invalid value"] = validUnavailableDelayGreaterThan0

validLeaseDurationGreaterThan0 := validKubeFedConfig()
validLeaseDurationGreaterThan0.Spec.LeaderElect.LeaseDuration.Duration = 0
errorCases["spec.leaderElect.leaseDuration: Invalid value"] = validLeaseDurationGreaterThan0

validRenewDeadlineGreaterThan0 := validKubeFedConfig()
validRenewDeadlineGreaterThan0.Spec.LeaderElect.RenewDeadline.Duration = 0
errorCases["spec.leaderElect.renewDeadline: Invalid value"] = validRenewDeadlineGreaterThan0

// spec.leaderElect.leaderDuration must be greater than renewDeadline
validElectorLeaseDurationGreater := validKubeFedConfig()
validElectorLeaseDurationGreater.Spec.LeaderElect.LeaseDuration.Duration = 1
validElectorLeaseDurationGreater.Spec.LeaderElect.RenewDeadline.Duration = 2
errorCases["spec.leaderElect.leaseDuration: Invalid value"] = validElectorLeaseDurationGreater

validRetryPeriodGreaterThan0 := validKubeFedConfig()
validRetryPeriodGreaterThan0.Spec.LeaderElect.RetryPeriod.Duration = 0
errorCases["spec.leaderElect.retryPeriod: Invalid value"] = validRetryPeriodGreaterThan0

// spec.leaderElect.renewDeadline must be greater than retryPeriod*JitterFactor(1.2)
validElectorDuration := validKubeFedConfig()
validElectorDuration.Spec.LeaderElect.RenewDeadline.Duration = 12
validElectorDuration.Spec.LeaderElect.RetryPeriod.Duration = 10
errorCases["spec.leaderElect.renewDeadline: Invalid value"] = validElectorDuration

validElectorResourceLock := validKubeFedConfig()
validElectorResourceLock.Spec.LeaderElect.ResourceLock = "NeitherConfigmapsOrEndpoints"
errorCases["spec.leaderElect.resourceLock: Unsupported value"] = validElectorResourceLock

validFeatureGateName := validKubeFedConfig()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a bad feature gate name.

validFeatureGateName.Spec.FeatureGates[0].Name = "BadFeatureName"
errorCases["spec.featureGates.name: Unsupported value"] = validFeatureGateName

validDupFeatureGates := validKubeFedConfig()
dupFeature := v1beta1.FeatureGatesConfig{
Name: string(features.PushReconciler),
Configuration: v1beta1.ConfigurationEnabled,
}
validDupFeatureGates.Spec.FeatureGates = append(validDupFeatureGates.Spec.FeatureGates, dupFeature)
errorCases["spec.featureGates.name: Duplicate value"] = validDupFeatureGates

validFeatureGateConf := validKubeFedConfig()
validFeatureGateConf.Spec.FeatureGates[0].Configuration = "NeitherEnableOrDisable"
errorCases["spec.featureGates.configuration: Unsupported value"] = validFeatureGateConf

validPeriodSecondsGreaterThan0 := validKubeFedConfig()
validPeriodSecondsGreaterThan0.Spec.ClusterHealthCheck.PeriodSeconds = 0
errorCases["spec.clusterHealthCheck.periodSeconds: Invalid value"] = validPeriodSecondsGreaterThan0

validFailureThresholdGreaterThan0 := validKubeFedConfig()
validFailureThresholdGreaterThan0.Spec.ClusterHealthCheck.FailureThreshold = 0
errorCases["spec.clusterHealthCheck.failureThreshold: Invalid value"] = validFailureThresholdGreaterThan0

validSuccessThresholdGreaterThan0 := validKubeFedConfig()
validSuccessThresholdGreaterThan0.Spec.ClusterHealthCheck.SuccessThreshold = 0
errorCases["spec.clusterHealthCheck.successThreshold: Invalid value"] = validSuccessThresholdGreaterThan0

validTimeoutSecondsGreaterThan0 := validKubeFedConfig()
validTimeoutSecondsGreaterThan0.Spec.ClusterHealthCheck.TimeoutSeconds = 0
errorCases["spec.clusterHealthCheck.timeoutSeconds: Invalid value"] = validTimeoutSecondsGreaterThan0

validAdoptResources := validKubeFedConfig()
validAdoptResources.Spec.SyncController.AdoptResources = "NeitherEnableOrDisable"
errorCases["spec.syncController.adoptResources: Unsupported value"] = validAdoptResources

for k, v := range errorCases {
errs := ValidateKubeFedConfig(v)
if len(errs) == 0 {
t.Errorf("[%s] expected failure", k)
} else if !strings.Contains(errs[0].Error(), k) {
t.Errorf("unexpected error: %q, expected: %q", errs[0].Error(), k)
}
}
}

func validKubeFedConfig() *v1beta1.KubeFedConfig {
kfc := &v1beta1.KubeFedConfig{
ObjectMeta: metav1.ObjectMeta{
Namespace: util.DefaultKubeFedSystemNamespace,
Name: util.KubeFedConfigName,
},
Spec: v1beta1.KubeFedConfigSpec{
Scope: apiextv1b1.ClusterScoped,
},
}

app.SetDefaultKubeFedConfig(kfc)

for _, name := range features.FeatureNames {
feature := v1beta1.FeatureGatesConfig{Name: string(name), Configuration: v1beta1.ConfigurationEnabled}
kfc.Spec.FeatureGates = append(kfc.Spec.FeatureGates, feature)
}

return kfc
}
Loading