diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 23748f10b..3ecaf95ea 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -64,13 +64,16 @@ var ( cmpopts.IgnoreFields(v1alpha1.AgentStatus{}, "LastReceivedHeartbeat"), sortOption, } - mcStatusCmpOptions = []cmp.Option{ cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration"), cmpopts.IgnoreFields(v1alpha1.AgentStatus{}, "LastReceivedHeartbeat"), cmpopts.IgnoreFields(v1alpha1.ResourceUsage{}, "ObservationTime"), sortOption, } + crpStatusCmpOptions = []cmp.Option{ + cmpopts.IgnoreFields(metav1.Condition{}, "ObservedGeneration", "LastTransitionTime", "Message"), + sortOption, + } imcJoinedAgentStatus = []v1alpha1.AgentStatus{ { @@ -165,8 +168,6 @@ var _ = BeforeSuite(func() { MemberCluster.HubURL = hubURL framework.GetClusterClient(MemberCluster) - testutils.CreateNamespace(*HubCluster, workNamespace) - ctx = context.Background() By("deploy member cluster in the hub cluster") @@ -200,31 +201,16 @@ var _ = BeforeSuite(func() { By("check if internal member cluster status is updated to Joined") wantIMCStatus := v1alpha1.InternalMemberClusterStatus{AgentStatus: imcJoinedAgentStatus} - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantIMCStatus, imc.Status, imcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("internal member cluster(%s) status mismatch (-want +got):\n%s", imc.Name, statusDiff) - } - return nil - }, 3*testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for internal member cluster %s to have status %s", imc.Name, wantIMCStatus) + testutils.CheckInternalMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, wantIMCStatus, imcStatusCmpOptions) By("check if member cluster status is updated to Joined") + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc)).Should(Succeed(), "Failed to retrieve internal member cluster %s in %s cluster", imc.Name, HubCluster.ClusterName) wantMCStatus := v1alpha1.MemberClusterStatus{ AgentStatus: imc.Status.AgentStatus, Conditions: mcJoinedConditions, ResourceUsage: imc.Status.ResourceUsage, } - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantMCStatus, mc.Status, mcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", mc.Name, statusDiff) - } - return nil - }, 3*testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for internal member cluster %s to have status %s", mc.Name, wantMCStatus) + testutils.CheckMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: mc.Name}, wantMCStatus, mcStatusCmpOptions) }) var _ = AfterSuite(func() { @@ -235,34 +221,17 @@ var _ = AfterSuite(func() { By("check if internal member cluster status is updated to Left") wantIMCStatus := v1alpha1.InternalMemberClusterStatus{AgentStatus: imcLeftAgentStatus} - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantIMCStatus, imc.Status, imcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("internal member cluster(%s) status mismatch (-want +got):\n%s", imc.Name, statusDiff) - } - return nil - }, 3*testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for internal member cluster %s to have status %s", imc.Name, wantIMCStatus) + testutils.CheckInternalMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, wantIMCStatus, imcStatusCmpOptions) By("check if member cluster status is updated to Left") + Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc)).Should(Succeed(), "Failed to retrieve internal member cluster %s in %s cluster", imc.Name, HubCluster.ClusterName) wantMCStatus := v1alpha1.MemberClusterStatus{ AgentStatus: imc.Status.AgentStatus, Conditions: mcLeftConditions, ResourceUsage: imc.Status.ResourceUsage, } - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantMCStatus, mc.Status, mcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", mc.Name, statusDiff) - } - return nil - }, 3*testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for internal member cluster %s to have status %s", mc.Name, wantMCStatus) + testutils.CheckMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: mc.Name}, wantMCStatus, mcStatusCmpOptions) - testutils.DeleteNamespace(*MemberCluster, memberNamespace) - testutils.DeleteNamespace(*HubCluster, workNamespace) By("delete member cluster") testutils.DeleteMemberCluster(ctx, *HubCluster, mc) }) diff --git a/test/e2e/join_leave_placement_test.go b/test/e2e/join_leave_placement_test.go index 875cefaa0..ab12a99a1 100644 --- a/test/e2e/join_leave_placement_test.go +++ b/test/e2e/join_leave_placement_test.go @@ -6,7 +6,6 @@ package e2e import ( "context" - "fmt" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -18,13 +17,12 @@ import ( "k8s.io/apimachinery/pkg/types" "go.goms.io/fleet/apis/v1alpha1" - testutils "go.goms.io/fleet/test/e2e/utils" + "go.goms.io/fleet/test/e2e/utils" ) // Serial - Ginkgo will guarantee that these specs will never run in parallel with other specs. -// Ordered - Ginkgo will guarantee that specs in an Ordered container will run sequentially, in the order they are written. // This test cannot be run in parallel with other specs in the suite as it's leaving, joining, leaving and joining again. -var _ = Describe("workload orchestration testing with join/leave", Serial, Ordered, func() { +var _ = Describe("workload orchestration testing with join/leave", Serial, func() { var ( crp *v1alpha1.ClusterResourcePlacement ctx context.Context @@ -32,7 +30,8 @@ var _ = Describe("workload orchestration testing with join/leave", Serial, Order mcStatusCmpOptions = []cmp.Option{ cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration"), cmpopts.IgnoreFields(v1alpha1.AgentStatus{}, "LastReceivedHeartbeat"), - cmpopts.IgnoreTypes(v1alpha1.ResourceUsage{}), cmpopts.SortSlices(func(ref1, ref2 metav1.Condition) bool { return ref1.Type < ref2.Type }), + cmpopts.IgnoreTypes(v1alpha1.ResourceUsage{}), + sortOption, } ) @@ -52,15 +51,7 @@ var _ = Describe("workload orchestration testing with join/leave", Serial, Order AgentStatus: imcLeftAgentStatus, Conditions: mcLeftConditions, } - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantMCStatus, mc.Status, mcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", mc.Name, statusDiff) - } - return nil - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for member cluster %s to have status %s", mc.Name, wantMCStatus) + utils.CheckMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: mc.Name}, wantMCStatus, mcStatusCmpOptions) By("create the resources to be propagated") cr := &rbacv1.ClusterRole{ @@ -76,7 +67,7 @@ var _ = Describe("workload orchestration testing with join/leave", Serial, Order }, }, } - testutils.CreateClusterRole(*HubCluster, cr) + Expect(HubCluster.KubeClient.Create(ctx, cr)).Should(Succeed(), "Failed to create cluster role %s in %s cluster", cr.Name, HubCluster.ClusterName) By("create the cluster resource placement in the hub cluster") crp = &v1alpha1.ClusterResourcePlacement{ @@ -96,12 +87,12 @@ var _ = Describe("workload orchestration testing with join/leave", Serial, Order }, }, } - testutils.CreateClusterResourcePlacement(*HubCluster, crp) + Expect(HubCluster.KubeClient.Create(ctx, crp)).Should(Succeed(), "Failed to create cluster resource placement %s in %s cluster", crp.Name, HubCluster.ClusterName) By("verify the resource is not propagated to member cluster") Consistently(func() bool { return apierrors.IsNotFound(MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name}, cr)) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue(), "Failed to verify cluster role %s is not propagated to %s cluster", cr.Name, MemberCluster.ClusterName) + }, utils.PollTimeout, utils.PollInterval).Should(BeTrue(), "Failed to verify cluster role %s is not propagated to %s cluster", cr.Name, MemberCluster.ClusterName) By("update member cluster in the hub cluster to join") Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc)).Should(Succeed(), "Failed to retrieve member cluster %s in %s cluster", mc.Name, HubCluster.ClusterName) @@ -113,18 +104,33 @@ var _ = Describe("workload orchestration testing with join/leave", Serial, Order AgentStatus: imcJoinedAgentStatus, Conditions: mcJoinedConditions, } - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantMCStatus, mc.Status, mcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", mc.Name, statusDiff) - } - return nil - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for member cluster %s to have status %s", mc.Name, wantMCStatus) + utils.CheckMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: mc.Name}, wantMCStatus, mcStatusCmpOptions) By("verify that the cluster resource placement is applied") - testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), metav1.ConditionTrue, testutils.PollTimeout) + crpStatus := v1alpha1.ClusterResourcePlacementStatus{ + Conditions: []metav1.Condition{ + { + Reason: "ScheduleSucceeded", + Status: metav1.ConditionTrue, + Type: string(v1alpha1.ResourcePlacementConditionTypeScheduled), + }, + { + Reason: "ApplySucceeded", + Status: metav1.ConditionTrue, + Type: string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), + }, + }, + SelectedResources: []v1alpha1.ResourceIdentifier{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: cr.Name, + }, + }, + TargetClusters: []string{"kind-member-testing"}, + } + utils.WaitCreateClusterResourcePlacementStatus(ctx, *HubCluster, &types.NamespacedName{Name: crp.Name}, crpStatus, crpStatusCmpOptions, 3*utils.PollTimeout) By("verify the resource is propagated to member cluster") Expect(MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name}, cr)).Should(Succeed(), "Failed to verify cluster role %s is propagated to %s cluster", cr.Name, MemberCluster.ClusterName) @@ -139,31 +145,26 @@ var _ = Describe("workload orchestration testing with join/leave", Serial, Order AgentStatus: imcLeftAgentStatus, Conditions: mcLeftConditions, } - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantMCStatus, mc.Status, mcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", mc.Name, statusDiff) - } - return nil - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for internal member cluster %s to have status %s", mc.Name, wantMCStatus) + utils.CheckMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: mc.Name}, wantMCStatus, mcStatusCmpOptions) By("verify that the resource is still on the member cluster") Consistently(func() error { return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name}, cr) - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to verify cluster role %s is still on %s cluster", cr.Name, MemberCluster.ClusterName) + }, utils.PollTimeout, utils.PollInterval).Should(Succeed(), "Failed to verify cluster role %s is still on %s cluster", cr.Name, MemberCluster.ClusterName) By("delete the crp from the hub") - testutils.DeleteClusterResourcePlacement(*HubCluster, crp) + utils.DeleteClusterResourcePlacement(ctx, *HubCluster, crp) By("verify that the resource is still on the member cluster") Consistently(func() error { return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name, Namespace: ""}, cr) - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to verify cluster role %s is still on %s cluster", cr.Name, MemberCluster.ClusterName) + }, utils.PollTimeout, utils.PollInterval).Should(Succeed(), "Failed to verify cluster role %s is still on %s cluster", cr.Name, MemberCluster.ClusterName) By("delete cluster role on hub cluster") - testutils.DeleteClusterRole(*HubCluster, cr) + Expect(HubCluster.KubeClient.Delete(ctx, cr)).Should(Succeed(), "Failed to delete cluster role %s in %s cluster", cr.Name, HubCluster.ClusterName) + + By("delete cluster role on member cluster") + Expect(MemberCluster.KubeClient.Delete(ctx, cr)).Should(Succeed(), "Failed to delete cluster role %s in %s cluster", cr.Name, MemberCluster.ClusterName) By("update member cluster in the hub cluster to join") Expect(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc)).Should(Succeed(), "Failed to retrieve member cluster %s in %s cluster", mc.Name, HubCluster.ClusterName) @@ -175,14 +176,6 @@ var _ = Describe("workload orchestration testing with join/leave", Serial, Order AgentStatus: imcJoinedAgentStatus, Conditions: mcJoinedConditions, } - Eventually(func() error { - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc); err != nil { - return err - } - if statusDiff := cmp.Diff(wantMCStatus, mc.Status, mcStatusCmpOptions...); statusDiff != "" { - return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", mc.Name, statusDiff) - } - return nil - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to wait for member cluster %s to have status %s", mc.Name, wantMCStatus) + utils.CheckMemberClusterStatus(ctx, *HubCluster, &types.NamespacedName{Name: mc.Name}, wantMCStatus, mcStatusCmpOptions) }) }) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 0ff01d8c6..34a3d691c 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -11,10 +11,9 @@ import ( "time" // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. - "github.com/onsi/ginkgo/v2" + "github.com/google/go-cmp/cmp" "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -26,7 +25,7 @@ import ( "k8s.io/klog/v2" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - "go.goms.io/fleet/apis/v1alpha1" + fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/test/e2e/framework" ) @@ -39,69 +38,39 @@ var ( ) // DeleteMemberCluster deletes MemberCluster in the hub cluster. -func DeleteMemberCluster(ctx context.Context, cluster framework.Cluster, mc *v1alpha1.MemberCluster) { +func DeleteMemberCluster(ctx context.Context, cluster framework.Cluster, mc *fleetv1alpha1.MemberCluster) { gomega.Expect(cluster.KubeClient.Delete(ctx, mc)).Should(gomega.Succeed(), "Failed to delete member cluster %s in %s cluster", mc.Name, cluster.ClusterName) gomega.Eventually(func() bool { return apierrors.IsNotFound(cluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc)) }, PollTimeout, PollInterval).Should(gomega.BeTrue(), "Failed to wait for member cluster %s to be deleted in %s cluster", mc.Name, cluster.ClusterName) } -// CreateClusterRole create cluster role in the hub cluster. -func CreateClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { - ginkgo.By(fmt.Sprintf("Creating ClusterRole (%s)", cr.Name), func() { - err := cluster.KubeClient.Create(context.TODO(), cr) - gomega.Expect(err).Should(gomega.Succeed()) - }) -} - -// WaitClusterRole waits for cluster roles to be created. -func WaitClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { - klog.Infof("Waiting for ClusterRole(%s) to be synced", cr.Name) +// CheckMemberClusterStatus is used to check member cluster status. +func CheckMemberClusterStatus(ctx context.Context, cluster framework.Cluster, objectKey *types.NamespacedName, wantMCStatus fleetv1alpha1.MemberClusterStatus, mcStatusCmpOptions []cmp.Option) { + gotMC := &fleetv1alpha1.MemberCluster{} gomega.Eventually(func() error { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: cr.Name, Namespace: ""}, cr) - return err - }, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred()) -} - -// DeleteClusterRole deletes cluster role on cluster. -func DeleteClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { - ginkgo.By(fmt.Sprintf("Deleting ClusterRole(%s)", cr.Name), func() { - err := cluster.KubeClient.Delete(context.TODO(), cr) - gomega.Expect(err).Should(gomega.Succeed()) - }) + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: objectKey.Name}, gotMC); err != nil { + return err + } + if statusDiff := cmp.Diff(wantMCStatus, gotMC.Status, mcStatusCmpOptions...); statusDiff != "" { + return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", gotMC.Name, statusDiff) + } + return nil + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Failed to wait member cluster %s to have status %s", gotMC.Name, wantMCStatus) } -// CreateClusterResourcePlacement created ClusterResourcePlacement and waits for ClusterResourcePlacement to exist in hub cluster. -func CreateClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) { - ginkgo.By(fmt.Sprintf("Creating ClusterResourcePlacement(%s)", crp.Name), func() { - err := cluster.KubeClient.Create(context.TODO(), crp) - gomega.Expect(err).Should(gomega.Succeed()) - }) - klog.Infof("Waiting for ClusterResourcePlacement(%s) to be synced", crp.Name) +// CheckInternalMemberClusterStatus is used to check internal member cluster status. +func CheckInternalMemberClusterStatus(ctx context.Context, cluster framework.Cluster, objectKey *types.NamespacedName, wantIMCStatus fleetv1alpha1.InternalMemberClusterStatus, imcStatusCmpOptions []cmp.Option) { + gotIMC := &fleetv1alpha1.InternalMemberCluster{} gomega.Eventually(func() error { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: crp.Name, Namespace: ""}, crp) - return err - }, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred()) -} - -// WaitConditionClusterResourcePlacement waits for ClusterResourcePlacement to present on th hub cluster with a specific condition. -func WaitConditionClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement, - conditionName string, status metav1.ConditionStatus, customTimeout time.Duration) { - klog.Infof("Waiting for ClusterResourcePlacement(%s) condition(%s) status(%s) to be synced", crp.Name, conditionName, status) - gomega.Eventually(func() bool { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: crp.Name, Namespace: ""}, crp) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - cond := crp.GetCondition(conditionName) - return cond != nil && cond.Status == status - }, customTimeout, PollInterval).Should(gomega.Equal(true)) -} - -// DeleteClusterResourcePlacement is used delete ClusterResourcePlacement on the hub cluster. -func DeleteClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) { - ginkgo.By(fmt.Sprintf("Deleting ClusterResourcePlacement(%s)", crp.Name), func() { - err := cluster.KubeClient.Delete(context.TODO(), crp) - gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{})) - }) + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: objectKey.Name, Namespace: objectKey.Namespace}, gotIMC); err != nil { + return err + } + if statusDiff := cmp.Diff(wantIMCStatus, gotIMC.Status, imcStatusCmpOptions...); statusDiff != "" { + return fmt.Errorf("member cluster(%s) status mismatch (-want +got):\n%s", gotIMC.Name, statusDiff) + } + return nil + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Failed to wait for internal member cluster %s to have status %s", gotIMC.Name, wantIMCStatus) } // WaitWork waits for Work to be present on the hub cluster. @@ -116,28 +85,12 @@ func WaitWork(ctx context.Context, cluster framework.Cluster, workName, workName }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Work %s not synced", name) } -// CreateNamespace create namespace and waits for namespace to exist. -func CreateNamespace(cluster framework.Cluster, ns *corev1.Namespace) { - ginkgo.By(fmt.Sprintf("Creating Namespace(%s)", ns.Name), func() { - err := cluster.KubeClient.Create(context.TODO(), ns) - gomega.Expect(err).Should(gomega.Succeed(), "Failed to create namespace %s", ns.Name) - }) - klog.Infof("Waiting for Namespace(%s) to be synced", ns.Name) - gomega.Eventually(func() error { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: ns.Name, Namespace: ""}, ns) - - return err - }, PollTimeout, PollInterval).Should(gomega.Succeed()) -} - // DeleteNamespace delete namespace. -func DeleteNamespace(cluster framework.Cluster, ns *corev1.Namespace) { - ginkgo.By(fmt.Sprintf("Deleting Namespace(%s)", ns.Name), func() { - err := cluster.KubeClient.Delete(context.TODO(), ns) - if err != nil && !apierrors.IsNotFound(err) { - gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{})) - } - }) +func DeleteNamespace(ctx context.Context, cluster framework.Cluster, ns *corev1.Namespace) { + gomega.Expect(cluster.KubeClient.Delete(context.TODO(), ns)).Should(gomega.Succeed(), "Failed to delete namespace %s in %s cluster", ns.Name, cluster.ClusterName) + gomega.Eventually(func() bool { + return apierrors.IsNotFound(cluster.KubeClient.Get(ctx, types.NamespacedName{Name: ns.Name}, ns)) + }, PollTimeout, PollInterval).Should(gomega.BeTrue(), "Failed to wait for namespace %s to be deleted in %s cluster", ns.Name, cluster.ClusterName) } // CreateWork creates Work object based on manifest given. diff --git a/test/e2e/utils/workload_test_utils.go b/test/e2e/utils/workload_test_utils.go new file mode 100644 index 000000000..3c4fe84c7 --- /dev/null +++ b/test/e2e/utils/workload_test_utils.go @@ -0,0 +1,101 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package utils + +import ( + "context" + "fmt" + "time" + + // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. + "github.com/google/go-cmp/cmp" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" + "go.goms.io/fleet/test/e2e/framework" +) + +// CmpClusterRole compares actual cluster role with expected cluster role. +func CmpClusterRole(ctx context.Context, cluster framework.Cluster, objectKey *types.NamespacedName, wantClusterRole *rbacv1.ClusterRole, cmpOptions []cmp.Option) { + gotClusterRole := &rbacv1.ClusterRole{} + gomega.Eventually(func() error { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: objectKey.Name}, gotClusterRole); err != nil { + return err + } + if diff := cmp.Diff(wantClusterRole, gotClusterRole, cmpOptions...); diff != "" { + return fmt.Errorf("cluster role(%s) mismatch (-want +got):\n%s", gotClusterRole.Name, diff) + } + return nil + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Failed to compare actual and expected cluster roles in %s cluster", cluster.ClusterName) +} + +// CmpNamespace compares actual namespace with expected namespace. +func CmpNamespace(ctx context.Context, cluster framework.Cluster, objectKey *types.NamespacedName, wantNamespace *corev1.Namespace, cmpOptions []cmp.Option) { + gotNamespace := &corev1.Namespace{} + gomega.Eventually(func() error { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: objectKey.Name}, gotNamespace); err != nil { + return err + } + if diff := cmp.Diff(wantNamespace, gotNamespace, cmpOptions...); diff != "" { + return fmt.Errorf(" namespace(%s) mismatch (-want +got):\n%s", gotNamespace.Name, diff) + } + return nil + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Failed to compare actual and expected namespaces in %s cluster", cluster.ClusterName) +} + +// CmpRole compares actual role with expected role. +func CmpRole(ctx context.Context, cluster framework.Cluster, objectKey *types.NamespacedName, wantRole *rbacv1.Role, cmpOptions []cmp.Option) { + gotRole := &rbacv1.Role{} + gomega.Eventually(func() error { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: objectKey.Name, Namespace: objectKey.Namespace}, gotRole); err != nil { + return err + } + if diff := cmp.Diff(wantRole, gotRole, cmpOptions...); diff != "" { + return fmt.Errorf("role(%s) mismatch (-want +got):\n%s", gotRole.Name, diff) + } + return nil + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Failed to compare actual and expected roles in %s cluster", cluster.ClusterName) +} + +// CmpRoleBinding compares actual role binding with expected role binding. +func CmpRoleBinding(ctx context.Context, cluster framework.Cluster, objectKey *types.NamespacedName, wantRoleBinding *rbacv1.RoleBinding, cmpOptions []cmp.Option) { + gotRoleBinding := &rbacv1.RoleBinding{} + gomega.Eventually(func() error { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: objectKey.Name, Namespace: objectKey.Namespace}, gotRoleBinding); err != nil { + return err + } + if diff := cmp.Diff(wantRoleBinding, gotRoleBinding, cmpOptions...); diff != "" { + return fmt.Errorf("role binding(%s) mismatch (-want +got):\n%s", gotRoleBinding.Name, diff) + } + return nil + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Failed to compare actual and expected role bindings in %s cluster", cluster.ClusterName) +} + +// WaitCreateClusterResourcePlacementStatus waits for ClusterResourcePlacement to present on th hub cluster with a specific status. +func WaitCreateClusterResourcePlacementStatus(ctx context.Context, cluster framework.Cluster, objectKey *types.NamespacedName, wantCRPStatus fleetv1alpha1.ClusterResourcePlacementStatus, crpStatusCmpOptions []cmp.Option, customTimeout time.Duration) { + gotCRP := &fleetv1alpha1.ClusterResourcePlacement{} + gomega.Eventually(func() error { + if err := cluster.KubeClient.Get(ctx, types.NamespacedName{Name: objectKey.Name}, gotCRP); err != nil { + return err + } + if statusDiff := cmp.Diff(wantCRPStatus, gotCRP.Status, crpStatusCmpOptions...); statusDiff != "" { + return fmt.Errorf("cluster resource placment(%s) status mismatch (-want +got):\n%s", gotCRP.Name, statusDiff) + } + return nil + }, customTimeout, PollInterval).Should(gomega.Succeed(), "Failed to wait for cluster resource placement %s status to be updated", gotCRP.Name, cluster.ClusterName) +} + +// DeleteClusterResourcePlacement is used delete ClusterResourcePlacement on the hub cluster. +func DeleteClusterResourcePlacement(ctx context.Context, cluster framework.Cluster, crp *fleetv1alpha1.ClusterResourcePlacement) { + gomega.Expect(cluster.KubeClient.Delete(ctx, crp)).Should(gomega.Succeed(), "Failed to delete cluster resource placement %s in %s cluster", crp.Name, cluster.ClusterName) + gomega.Eventually(func() bool { + return apierrors.IsNotFound(cluster.KubeClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)) + }, PollTimeout, PollInterval).Should(gomega.BeTrue(), "Failed to wait for cluster resource placement %s to be deleted in %s cluster", crp.Name, cluster.ClusterName) +} diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index d1a9312bf..e8449df8c 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -59,6 +59,7 @@ var _ = Describe("Work API Controller test", func() { ) resourceNamespace *corev1.Namespace + workName string ) BeforeEach(func() { @@ -71,17 +72,23 @@ var _ = Describe("Work API Controller test", func() { Name: resourceNamespaceName, }, } - testutils.CreateNamespace(*MemberCluster, resourceNamespace) + Expect(MemberCluster.KubeClient.Create(ctx, resourceNamespace)).Should(Succeed(), "Failed to create namespace %s in %s cluster", resourceNamespace.Name, MemberCluster.ClusterName) }) AfterEach(func() { - testutils.DeleteNamespace(*MemberCluster, resourceNamespace) + testutils.DeleteNamespace(ctx, *MemberCluster, resourceNamespace) }) Context("Work Creation Test", func() { - It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { - workName := testutils.RandomWorkName(5) + BeforeEach(func() { + workName = testutils.RandomWorkName(5) + }) + + AfterEach(func() { + testutils.DeleteWork(ctx, *HubCluster, workapi.Work{ObjectMeta: metav1.ObjectMeta{Name: workName, Namespace: workNamespace.Name}}) + }) + It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { By(fmt.Sprintf("Here is the work Name %s", workName)) // Configmap will be included in this work object. @@ -204,7 +211,6 @@ var _ = Describe("Work API Controller test", func() { }) It("Upon successful creation of 2 work resources with same manifest, work manifest is applied, and only 1 resource is created with merged owner references.", func() { - workNameOne := testutils.RandomWorkName(5) workNameTwo := testutils.RandomWorkName(5) manifestSecretName := "test-secret" @@ -225,7 +231,7 @@ var _ = Describe("Work API Controller test", func() { } // Creating types.NamespacedName to use in retrieving objects. - namespaceTypeOne := types.NamespacedName{Name: workNameOne, Namespace: workNamespace.Name} + namespaceTypeOne := types.NamespacedName{Name: workName, Namespace: workNamespace.Name} namespaceTypeTwo := types.NamespacedName{Name: workNameTwo, Namespace: workNamespace.Name} resourceNamespaceType := types.NamespacedName{Name: manifestSecretName, Namespace: resourceNamespace.Name} @@ -233,7 +239,7 @@ var _ = Describe("Work API Controller test", func() { manifests := testutils.AddManifests([]runtime.Object{&secret}, []workapi.Manifest{}) By(fmt.Sprintf("creating work %s of %s", namespaceTypeOne, manifestSecretName)) - testutils.CreateWork(ctx, *HubCluster, workNameOne, workNamespace.Name, manifests) + testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifests) By(fmt.Sprintf("creating work %s of %s", namespaceTypeTwo, manifestSecretName)) testutils.CreateWork(ctx, *HubCluster, workNameTwo, workNamespace.Name, manifests) @@ -320,7 +326,7 @@ var _ = Describe("Work API Controller test", func() { appliedWorkOne := workapi.AppliedWork{} Expect(MemberCluster.KubeClient.Get(ctx, namespaceTypeOne, &appliedWorkOne)).Should(Succeed(), - "Retrieving AppliedWork %s failed", workNameOne) + "Retrieving AppliedWork %s failed", workName) Expect(cmp.Diff(wantAppliedStatus, appliedWorkOne.Status, appliedWorkCmpOptions...)).Should(BeEmpty(), "Validate AppliedResourceMeta mismatch (-want, +got):") @@ -363,11 +369,11 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("Validating that the annotation of resource's spec exists on the resource %s", manifestSecretName)) Expect(retrievedSecret.ObjectMeta.Annotations[specHashAnnotation]).ToNot(BeEmpty(), "SpecHash Annotation does not exist for resource %s", secret.Name) + + testutils.DeleteWork(ctx, *HubCluster, workTwo) }) It("Upon successful work creation of a CRD resource, manifest is applied, and resources are created", func() { - workName := testutils.RandomWorkName(5) - // Name of the CRD object from the manifest file crdName := "testcrds.multicluster.x-k8s.io" crdObjectName := "test-crd-object" @@ -559,8 +565,6 @@ var _ = Describe("Work API Controller test", func() { }) It("Manifests with dependencies within different work objects should successfully apply", func() { - - workNameForNamespace := testutils.RandomWorkName(5) workNameForServiceAccount := testutils.RandomWorkName(6) testNamespace := corev1.Namespace{ @@ -587,10 +591,10 @@ var _ = Describe("Work API Controller test", func() { manifestNamespace := testutils.AddManifests([]runtime.Object{&testNamespace}, []workapi.Manifest{}) manifestServiceAccount := testutils.AddManifests([]runtime.Object{&testServiceAccount}, []workapi.Manifest{}) - workForNamespace := testutils.CreateWork(ctx, *HubCluster, workNameForNamespace, workNamespace.Name, manifestNamespace) + workForNamespace := testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifestNamespace) workForServiceAccount := testutils.CreateWork(ctx, *HubCluster, workNameForServiceAccount, workNamespace.Name, manifestServiceAccount) - By(fmt.Sprintf("Applied Condition should be set to True for Work %s and %s", workNameForNamespace, workNameForServiceAccount)) + By(fmt.Sprintf("Applied Condition should be set to True for Work %s and %s", workName, workNameForServiceAccount)) wantAppliedCondition := []metav1.Condition{ { @@ -603,7 +607,7 @@ var _ = Describe("Work API Controller test", func() { receivedWorkForNamespace := workapi.Work{} receivedWorkForServiceAccount := workapi.Work{} - namespaceTypeForNamespaceWork := types.NamespacedName{Name: workNameForNamespace, Namespace: workNamespace.Name} + namespaceTypeForNamespaceWork := types.NamespacedName{Name: workName, Namespace: workNamespace.Name} Eventually(func() string { if err := HubCluster.KubeClient.Get(ctx, namespaceTypeForNamespaceWork, &receivedWorkForNamespace); err != nil { @@ -674,7 +678,7 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s and %s", testNamespace.Name, testServiceAccount.Name)) appliedWorkForNamespace := workapi.AppliedWork{} Expect(MemberCluster.KubeClient.Get(ctx, namespaceTypeForNamespaceWork, &appliedWorkForNamespace)).Should(Succeed(), - "Retrieving AppliedWork %s failed", workNameForNamespace) + "Retrieving AppliedWork %s failed", workName) wantAppliedWorkConditionNamespace := workapi.AppliedtWorkStatus{ AppliedResources: []workapi.AppliedResourceMeta{ @@ -773,7 +777,10 @@ var _ = Describe("Work API Controller test", func() { "OwnerReference mismatch for resource %s (-want, +got):", testNamespace.Name) Expect(cmp.Diff(wantOwnerForServiceAccount, retrievedServiceAccount.OwnerReferences, cmpOptions...)).Should(BeEmpty(), "OwnerReference mismatch for resource %s (-want, +got):", testServiceAccount.Name) + + testutils.DeleteWork(ctx, *HubCluster, workForServiceAccount) }) + }) Context("Updating Work", func() { @@ -823,6 +830,10 @@ var _ = Describe("Work API Controller test", func() { }) + AfterEach(func() { + testutils.DeleteWork(ctx, *HubCluster, work) + }) + It("Updating Work object on the Hub Cluster should update the resource on the member cluster.", func() { updatedConfigMap := configMap.DeepCopy() updatedConfigMap.Data = map[string]string{ @@ -943,8 +954,9 @@ var _ = Describe("Work API Controller test", func() { By("Deleting the Work Object should also delete the resources in the member cluster") configMapDeleted := corev1.ConfigMap{} resourceNamespaceType := types.NamespacedName{Name: configMapBeforeDelete.Name, Namespace: resourceNamespace.Name} - Expect(MemberCluster.KubeClient.Get(ctx, resourceNamespaceType, &configMapDeleted)).Should(&utils.NotFoundMatcher{}, - "resource %s was either not deleted or encountered an error in cluster %s", configMapBeforeDelete.Name, MemberCluster.ClusterName) + Eventually(func() error { + return MemberCluster.KubeClient.Get(ctx, resourceNamespaceType, &configMapDeleted) + }, testutils.PollTimeout, testutils.PollInterval).Should(&utils.NotFoundMatcher{}, "resource %s was either not deleted or encountered an error in cluster %s", configMapBeforeDelete.Name, MemberCluster.ClusterName) }) }) }) diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index 4a31ff06c..bbb5c4af2 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -6,36 +6,36 @@ Licensed under the MIT license. package e2e import ( - "context" - + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/apis/v1alpha1" - testutils "go.goms.io/fleet/test/e2e/utils" + "go.goms.io/fleet/test/e2e/utils" ) var _ = Describe("workload orchestration testing", func() { - var crp *v1alpha1.ClusterResourcePlacement - var ctx context.Context - - BeforeEach(func() { - ctx = context.Background() - }) - - AfterEach(func() { - testutils.DeleteClusterResourcePlacement(*HubCluster, crp) - }) + var ( + crp *v1alpha1.ClusterResourcePlacement + labelKey = "fleet.azure.com/name" + labelValue = "test" + resourceIgnoreOptions = []cmp.Option{cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion", "UID", "Annotations", "CreationTimestamp", "ManagedFields"), + cmpopts.IgnoreFields(metav1.OwnerReference{}, "UID")} + ) Context("Test Workload Orchestration", func() { - It("Apply CRP and check if work gets propagated", func() { - workName := "resource-label-selector" - labelKey := "fleet.azure.com/name" - labelValue := "test" + It("Apply CRP and check if cluster role gets propagated, update cluster role", func() { By("create the resources to be propagated") - cr := &rbacv1.ClusterRole{ - ObjectMeta: v1.ObjectMeta{ + clusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster-role", Labels: map[string]string{labelKey: labelValue}, }, @@ -47,38 +47,278 @@ var _ = Describe("workload orchestration testing", func() { }, }, } - testutils.CreateClusterRole(*HubCluster, cr) + Expect(HubCluster.KubeClient.Create(ctx, clusterRole)).Should(Succeed(), "Failed to create cluster role %s in %s cluster", clusterRole.Name, HubCluster.ClusterName) By("create the cluster resource placement in the hub cluster") crp = &v1alpha1.ClusterResourcePlacement{ - ObjectMeta: v1.ObjectMeta{Name: "resource-label-selector"}, + ObjectMeta: metav1.ObjectMeta{Name: "test-crp1"}, Spec: v1alpha1.ClusterResourcePlacementSpec{ ResourceSelectors: []v1alpha1.ClusterResourceSelector{ { Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole", - LabelSelector: &v1.LabelSelector{ - MatchLabels: map[string]string{"fleet.azure.com/name": "test"}, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: clusterRole.Labels, }, }, }, }, } - testutils.CreateClusterResourcePlacement(*HubCluster, crp) + Expect(HubCluster.KubeClient.Create(ctx, crp)).Should(Succeed(), "Failed to create cluster resource placement %s in %s cluster", crp.Name, HubCluster.ClusterName) By("check if work gets created for cluster resource placement") - testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) + utils.WaitWork(ctx, *HubCluster, crp.Name, memberNamespace.Name) - By("check if cluster resource placement is updated to Scheduled & Applied") - testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementConditionTypeScheduled), v1.ConditionTrue, testutils.PollTimeout) - testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), v1.ConditionTrue, testutils.PollTimeout) + By("check if cluster resource placement status is updated") + crpStatus := v1alpha1.ClusterResourcePlacementStatus{ + Conditions: []metav1.Condition{ + { + Reason: "ScheduleSucceeded", + Status: metav1.ConditionTrue, + Type: string(v1alpha1.ResourcePlacementConditionTypeScheduled), + }, + { + Reason: "ApplySucceeded", + Status: metav1.ConditionTrue, + Type: string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), + }, + }, + SelectedResources: []v1alpha1.ResourceIdentifier{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + }, + TargetClusters: []string{"kind-member-testing"}, + } + utils.WaitCreateClusterResourcePlacementStatus(ctx, *HubCluster, &types.NamespacedName{Name: crp.Name}, crpStatus, crpStatusCmpOptions, 3*utils.PollTimeout) + + By("check if cluster role is propagated to member cluster") + ownerReferences := []metav1.OwnerReference{ + { + APIVersion: workapi.GroupVersion.String(), + BlockOwnerDeletion: pointer.Bool(false), + Kind: "AppliedWork", + Name: crp.Name, + }, + } + expectedClusterRole := clusterRole + expectedClusterRole.OwnerReferences = ownerReferences + utils.CmpClusterRole(ctx, *MemberCluster, &types.NamespacedName{Name: clusterRole.Name}, expectedClusterRole, resourceIgnoreOptions) - By("check if resource is propagated to member cluster") - testutils.WaitClusterRole(*MemberCluster, cr) + By("update cluster role in Hub cluster") + rules := []rbacv1.PolicyRule{ + { + Verbs: []string{"get", "list", "watch"}, + APIGroups: []string{""}, + Resources: []string{"secrets"}, + }, + } + updatedClusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRole.Name, + Labels: map[string]string{labelKey: labelValue, "fleet.azure.com/region": "us"}, + }, + Rules: rules, + } + Expect(HubCluster.KubeClient.Update(ctx, updatedClusterRole)).Should(Succeed(), "Failed to update cluster role %s in %s cluster", updatedClusterRole.Name, HubCluster.ClusterName) + + By("check if cluster role got updated in member cluster") + expectedClusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-role", + Labels: updatedClusterRole.Labels, + OwnerReferences: ownerReferences, + }, + Rules: rules, + } + utils.CmpClusterRole(ctx, *MemberCluster, &types.NamespacedName{Name: clusterRole.Name}, expectedClusterRole, resourceIgnoreOptions) By("delete cluster role on hub cluster") - testutils.DeleteClusterRole(*HubCluster, cr) + Expect(HubCluster.KubeClient.Delete(ctx, clusterRole)).Should(Succeed(), "Failed to delete cluster role %s in %s cluster", clusterRole.Name, HubCluster.ClusterName) + Eventually(func() bool { + return errors.IsNotFound(HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: clusterRole.Name}, clusterRole)) + }, utils.PollTimeout, utils.PollInterval).Should(BeTrue(), "Failed to wait for cluster role %s to be deleted in %s cluster", clusterRole.Name, HubCluster.ClusterName) + + By("check if cluster role got deleted on member cluster") + Eventually(func() bool { + return errors.IsNotFound(MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: clusterRole.Name}, clusterRole)) + }, utils.PollTimeout, utils.PollInterval).Should(BeTrue(), "Failed to wait for cluster role %s to be deleted in %s cluster", clusterRole.Name, MemberCluster.ClusterName) + + By("delete cluster resource placement on hub cluster") + utils.DeleteClusterResourcePlacement(ctx, *HubCluster, crp) + }) + + It("Apply CRP selecting namespace by label and check if namespace gets propagated with role, role binding, then update existing role", func() { + By("create the resources to be propagated") + namespace1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-namespace1", + Labels: map[string]string{labelKey: labelValue}, + }, + } + namespace2 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-namespace2", + }, + } + Expect(HubCluster.KubeClient.Create(ctx, namespace1)).Should(Succeed(), "Failed to create namespace %s in %s cluster", namespace1.Name, HubCluster.ClusterName) + Expect(HubCluster.KubeClient.Create(ctx, namespace2)).Should(Succeed(), "Failed to create namespace %s in %s cluster", namespace2.Name, HubCluster.ClusterName) + + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-reader", + Namespace: namespace1.Name, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Verbs: []string{"get", "list", "watch"}, + Resources: []string{"pods"}, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, role)).Should(Succeed(), "Failed to create role %s in %s cluster", role.Name, HubCluster.ClusterName) + + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "read-pods", + Namespace: namespace1.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "User", + Name: "jane", + APIGroup: "rbac.authorization.k8s.io", + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: role.Name, + APIGroup: "rbac.authorization.k8s.io", + }, + } + Expect(HubCluster.KubeClient.Create(ctx, roleBinding)).Should(Succeed(), "Failed to create role binding %s in %s cluster", roleBinding.Name, HubCluster.ClusterName) + + By("create the cluster resource placement in the hub cluster") + crp = &v1alpha1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{Name: "test-crp2"}, + Spec: v1alpha1.ClusterResourcePlacementSpec{ + ResourceSelectors: []v1alpha1.ClusterResourceSelector{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: namespace1.Labels, + }, + }, + }, + }, + } + Expect(HubCluster.KubeClient.Create(ctx, crp)).Should(Succeed(), "Failed to create cluster resource placement %s in %s cluster", crp.Name, HubCluster.ClusterName) + + By("check if work gets created for cluster resource placement") + utils.WaitWork(ctx, *HubCluster, crp.Name, memberNamespace.Name) + + By("check if cluster resource placement status is updated") + crpStatus := v1alpha1.ClusterResourcePlacementStatus{ + Conditions: []metav1.Condition{ + { + Reason: "ScheduleSucceeded", + Status: metav1.ConditionTrue, + Type: string(v1alpha1.ResourcePlacementConditionTypeScheduled), + }, + { + Reason: "ApplySucceeded", + Status: metav1.ConditionTrue, + Type: string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), + }, + }, + SelectedResources: []v1alpha1.ResourceIdentifier{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "RoleBinding", + Name: roleBinding.Name, + Namespace: roleBinding.Namespace, + }, + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "Role", + Name: role.Name, + Namespace: role.Namespace, + }, + { + Version: "v1", + Kind: "Namespace", + Name: namespace1.Name, + }, + }, + TargetClusters: []string{"kind-member-testing"}, + } + utils.WaitCreateClusterResourcePlacementStatus(ctx, *HubCluster, &types.NamespacedName{Name: crp.Name}, crpStatus, crpStatusCmpOptions, 3*utils.PollTimeout) + + By("check if resources in namespace are propagated to member cluster") + ownerReferences := []metav1.OwnerReference{ + { + APIVersion: workapi.GroupVersion.String(), + BlockOwnerDeletion: pointer.Bool(false), + Kind: "AppliedWork", + Name: crp.Name, + }, + } + expectedNamespace := namespace1 + expectedRole := role + expectedRoleBinding := roleBinding + expectedNamespace.OwnerReferences = ownerReferences + expectedRole.OwnerReferences = ownerReferences + expectedRoleBinding.OwnerReferences = ownerReferences + utils.CmpNamespace(ctx, *MemberCluster, &types.NamespacedName{Name: namespace1.Name}, expectedNamespace, resourceIgnoreOptions) + utils.CmpRole(ctx, *MemberCluster, &types.NamespacedName{Name: role.Name, Namespace: role.Namespace}, expectedRole, resourceIgnoreOptions) + utils.CmpRoleBinding(ctx, *MemberCluster, &types.NamespacedName{Name: roleBinding.Name, Namespace: roleBinding.Namespace}, expectedRoleBinding, resourceIgnoreOptions) + + By("check if namespace not selected by CRP doesn't exist on member cluster") + Consistently(func() bool { + return errors.IsNotFound(MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: namespace2.Name}, namespace2)) + }, utils.PollTimeout, utils.PollInterval).Should(BeTrue(), "Failed to verify namespace %s is not propagated to %s cluster", namespace2.Name, MemberCluster.ClusterName) + + By("update role in Hub cluster") + rules := []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Verbs: []string{"get", "list", "watch", "update"}, + Resources: []string{"pods"}, + }, + } + updatedRole := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: role.Name, + Namespace: namespace1.Name, + }, + Rules: rules, + } + Expect(HubCluster.KubeClient.Update(ctx, updatedRole)).Should(Succeed(), "Failed to update role %s in %s cluster", updatedRole.Name, HubCluster.ClusterName) + expectedRole.Rules = rules + + By("check if role got updated in member cluster") + utils.CmpRole(ctx, *MemberCluster, &types.NamespacedName{Name: role.Name, Namespace: role.Namespace}, expectedRole, resourceIgnoreOptions) + + By("delete namespaces") + utils.DeleteNamespace(ctx, *HubCluster, namespace1) + utils.DeleteNamespace(ctx, *HubCluster, namespace2) + + By("check if namespace got deleted on member cluster") + Eventually(func() bool { + return errors.IsNotFound(MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: namespace1.Name}, namespace1)) + }, utils.PollTimeout, utils.PollInterval).Should(BeTrue(), "Failed to wait for cluster role %s to be deleted in %s cluster", namespace1.Name, MemberCluster.ClusterName) + + By("delete cluster resource placement on hub cluster") + utils.DeleteClusterResourcePlacement(ctx, *HubCluster, crp) }) }) })