diff --git a/Makefile b/Makefile index 7fad28a8b..d8c3438d4 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ CONTROLLER_GEN_VER := v0.16.0 CONTROLLER_GEN_BIN := controller-gen CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)) -STATICCHECK_VER := 2024.1 +STATICCHECK_VER := 2025.1 STATICCHECK_BIN := staticcheck STATICCHECK := $(abspath $(TOOLS_BIN_DIR)/$(STATICCHECK_BIN)-$(STATICCHECK_VER)) diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index f5376fb16..c4ad27f3b 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -51,6 +51,10 @@ import ( // if object size is greater than 1MB https://github.com/kubernetes/kubernetes/blob/db1990f48b92d603f469c1c89e2ad36da1b74846/test/integration/master/synthetic_master_test.go#L337 var resourceSnapshotResourceSizeLimit = 800 * (1 << 10) // 800KB +// We use a safety resync period to requeue all the finished request just in case there is a bug in the system. +// TODO: unify all the controllers with this pattern and make this configurable in place of the controller runtime resync period. +const controllerResyncPeriod = 15 * time.Minute + func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ctrl.Result, error) { name, ok := key.(string) if !ok { @@ -188,7 +192,8 @@ func (r *Reconciler) handleUpdate(ctx context.Context, crp *fleetv1beta1.Cluster klog.ErrorS(updateErr, "Failed to update the status", "clusterResourcePlacement", crpKObj) return ctrl.Result{}, controller.NewUpdateIgnoreConflictError(updateErr) } - return ctrl.Result{}, err + // no need to retry faster, the user needs to fix the resource selectors + return ctrl.Result{RequeueAfter: controllerResyncPeriod}, nil } latestSchedulingPolicySnapshot, err := r.getOrCreateClusterSchedulingPolicySnapshot(ctx, crp, int(revisionLimit)) @@ -252,11 +257,11 @@ func (r *Reconciler) handleUpdate(ctx context.Context, crp *fleetv1beta1.Cluster // Here we requeue the request to prevent a bug in the watcher. klog.V(2).InfoS("Scheduler has not scheduled any cluster yet and requeue the request as a backup", "clusterResourcePlacement", crpKObj, "scheduledCondition", crp.GetCondition(string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType)), "generation", crp.Generation) - return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil + return ctrl.Result{RequeueAfter: controllerResyncPeriod}, nil } klog.V(2).InfoS("Placement rollout has not finished yet and requeue the request", "clusterResourcePlacement", crpKObj, "status", crp.Status, "generation", crp.Generation) // no need to requeue the request as the binding status will be changed but we add a long resync loop just in case. - return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil + return ctrl.Result{RequeueAfter: controllerResyncPeriod}, nil } func (r *Reconciler) getOrCreateClusterSchedulingPolicySnapshot(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement, revisionHistoryLimit int) (*fleetv1beta1.ClusterSchedulingPolicySnapshot, error) { diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index c1df1838f..fc9cd73b9 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -48,8 +48,7 @@ func (r *Reconciler) selectResources(placement *fleetv1alpha1.ClusterResourcePla } placement.Status.SelectedResources = make([]fleetv1alpha1.ResourceIdentifier, 0) manifests := make([]workv1alpha1.Manifest, len(selectedObjects)) - for i, obj := range selectedObjects { - unstructuredObj := obj.DeepCopyObject().(*unstructured.Unstructured) + for i, unstructuredObj := range selectedObjects { gvk := unstructuredObj.GroupVersionKind() res := fleetv1alpha1.ResourceIdentifier{ Group: gvk.Group, @@ -81,8 +80,9 @@ func convertResourceSelector(old []fleetv1alpha1.ClusterResourceSelector) []flee } // gatherSelectedResource gets all the resources according to the resource selector. -func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv1beta1.ClusterResourceSelector) ([]runtime.Object, error) { - var resources []runtime.Object +func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv1beta1.ClusterResourceSelector) ([]*unstructured.Unstructured, error) { + var resources []*unstructured.Unstructured + var resourceMap = make(map[fleetv1beta1.ResourceIdentifier]bool) for _, selector := range selectors { gvk := schema.GroupVersionKind{ Group: selector.Group, @@ -104,7 +104,23 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv if err != nil { return nil, err } - resources = append(resources, objs...) + for _, obj := range objs { + uObj := obj.(*unstructured.Unstructured) + ri := fleetv1beta1.ResourceIdentifier{ + Group: obj.GetObjectKind().GroupVersionKind().Group, + Version: obj.GetObjectKind().GroupVersionKind().Version, + Kind: obj.GetObjectKind().GroupVersionKind().Kind, + Name: uObj.GetName(), + Namespace: uObj.GetNamespace(), + } + if _, exist := resourceMap[ri]; exist { + err = fmt.Errorf("found duplicate resource %+v", ri) + klog.ErrorS(err, "user selected one resource more than once", "resource", ri, "placement", placement) + return nil, controller.NewUserError(err) + } + resourceMap[ri] = true + resources = append(resources, uObj) + } } // sort the resources in strict order so that we will get the stable list of manifest so that // the generated work object doesn't change between reconcile loops @@ -113,16 +129,16 @@ func (r *Reconciler) gatherSelectedResource(placement string, selectors []fleetv return resources, nil } -func sortResources(resources []runtime.Object) { +func sortResources(resources []*unstructured.Unstructured) { sort.Slice(resources, func(i, j int) bool { - obj1 := resources[i].DeepCopyObject().(*unstructured.Unstructured) - obj2 := resources[j].DeepCopyObject().(*unstructured.Unstructured) + obj1 := resources[i] + obj2 := resources[j] gvk1 := obj1.GetObjectKind().GroupVersionKind().String() gvk2 := obj2.GetObjectKind().GroupVersionKind().String() // compare group/version;kind for the rest of type of resources gvkComp := strings.Compare(gvk1, gvk2) if gvkComp == 0 { - // same gvk, compare namespace/name + // same gvk, compare namespace/name, no duplication exists return strings.Compare(fmt.Sprintf("%s/%s", obj1.GetNamespace(), obj1.GetName()), fmt.Sprintf("%s/%s", obj2.GetNamespace(), obj2.GetName())) > 0 } @@ -442,8 +458,7 @@ func (r *Reconciler) selectResourcesForPlacement(placement *fleetv1beta1.Cluster resources := make([]fleetv1beta1.ResourceContent, len(selectedObjects)) resourcesIDs := make([]fleetv1beta1.ResourceIdentifier, len(selectedObjects)) - for i, obj := range selectedObjects { - unstructuredObj := obj.DeepCopyObject().(*unstructured.Unstructured) + for i, unstructuredObj := range selectedObjects { rc, err := generateResourceContent(unstructuredObj) if err != nil { return 0, nil, nil, err diff --git a/pkg/controllers/clusterresourceplacement/resource_selector_test.go b/pkg/controllers/clusterresourceplacement/resource_selector_test.go index 033dcd51e..52d89b6d6 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector_test.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector_test.go @@ -935,52 +935,52 @@ func TestSortResource(t *testing.T) { } tests := map[string]struct { - resources []runtime.Object - want []runtime.Object + resources []*unstructured.Unstructured + want []*unstructured.Unstructured }{ "should handle empty resources list": { - resources: []runtime.Object{}, - want: []runtime.Object{}, + resources: []*unstructured.Unstructured{}, + want: []*unstructured.Unstructured{}, }, "should handle single resource": { - resources: []runtime.Object{deployment}, - want: []runtime.Object{deployment}, + resources: []*unstructured.Unstructured{deployment}, + want: []*unstructured.Unstructured{deployment}, }, "should handle multiple namespaces": { - resources: []runtime.Object{namespace1, namespace2}, - want: []runtime.Object{namespace2, namespace1}, + resources: []*unstructured.Unstructured{namespace1, namespace2}, + want: []*unstructured.Unstructured{namespace2, namespace1}, }, "should gather selected resources with Namespace in front with order": { - resources: []runtime.Object{deployment, namespace1, namespace2}, - want: []runtime.Object{namespace2, namespace1, deployment}, + resources: []*unstructured.Unstructured{deployment, namespace1, namespace2}, + want: []*unstructured.Unstructured{namespace2, namespace1, deployment}, }, "should gather selected resources with CRD in front with order": { - resources: []runtime.Object{clusterRole, crd1, crd2}, - want: []runtime.Object{crd2, crd1, clusterRole}, + resources: []*unstructured.Unstructured{clusterRole, crd1, crd2}, + want: []*unstructured.Unstructured{crd2, crd1, clusterRole}, }, "should gather selected resources with CRD or Namespace in front with order": { - resources: []runtime.Object{deployment, namespace1, namespace2, clusterRole, crd1, crd2}, - want: []runtime.Object{namespace2, namespace1, crd2, crd1, clusterRole, deployment}, + resources: []*unstructured.Unstructured{deployment, namespace1, namespace2, clusterRole, crd1, crd2}, + want: []*unstructured.Unstructured{namespace2, namespace1, crd2, crd1, clusterRole, deployment}, }, "should gather selected resources with CRD or Namespace in front with order, second case": { - resources: []runtime.Object{crd1, crd2, deployment, namespace2, clusterRole}, - want: []runtime.Object{namespace2, crd2, crd1, deployment, clusterRole}, + resources: []*unstructured.Unstructured{crd1, crd2, deployment, namespace2, clusterRole}, + want: []*unstructured.Unstructured{namespace2, crd2, crd1, deployment, clusterRole}, }, "should gather selected resources with PersistentVolumeClaim in front with order": { - resources: []runtime.Object{deployment, pvc, namespace1, role}, - want: []runtime.Object{namespace1, pvc, deployment, role}, + resources: []*unstructured.Unstructured{deployment, pvc, namespace1, role}, + want: []*unstructured.Unstructured{namespace1, pvc, deployment, role}, }, "should gather selected resources with Secret in front with order": { - resources: []runtime.Object{deployment, secret, namespace1, crd1, namespace2, role}, - want: []runtime.Object{namespace2, namespace1, crd1, secret, deployment, role}, + resources: []*unstructured.Unstructured{deployment, secret, namespace1, crd1, namespace2, role}, + want: []*unstructured.Unstructured{namespace2, namespace1, crd1, secret, deployment, role}, }, "should gather selected resources with ConfigMap and Secret in front with order": { - resources: []runtime.Object{deployment, secret, namespace1, role, configMap, secret2}, - want: []runtime.Object{namespace1, configMap, secret2, secret, deployment, role}, + resources: []*unstructured.Unstructured{deployment, secret, namespace1, role, configMap, secret2}, + want: []*unstructured.Unstructured{namespace1, configMap, secret2, secret, deployment, role}, }, "should gather selected all the resources with the right order": { - resources: []runtime.Object{configMap, deployment, role, crd1, pvc, secret2, clusterRole, secret, namespace1, namespace2, crd2}, - want: []runtime.Object{namespace2, namespace1, crd2, crd1, configMap, secret2, secret, pvc, deployment, clusterRole, role}, + resources: []*unstructured.Unstructured{configMap, deployment, role, crd1, pvc, secret2, clusterRole, secret, namespace1, namespace2, crd2}, + want: []*unstructured.Unstructured{namespace2, namespace1, crd2, crd1, configMap, secret2, secret, pvc, deployment, clusterRole, role}, }, } diff --git a/test/e2e/placement_ro_test.go b/test/e2e/placement_ro_test.go index e733a5bd8..9fe982d49 100644 --- a/test/e2e/placement_ro_test.go +++ b/test/e2e/placement_ro_test.go @@ -515,7 +515,7 @@ var _ = Context("creating resourceOverride with incorrect path", Ordered, func() BeforeAll(func() { By("creating work resources") createWorkResources() - // Create the ro. + // Create the bad ro. ro := &placementv1alpha1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ Name: roName, @@ -544,7 +544,7 @@ var _ = Context("creating resourceOverride with incorrect path", Ordered, func() }, }, } - By(fmt.Sprintf("creating resourceOverride %s", roName)) + By(fmt.Sprintf("creating the bad resourceOverride %s", roName)) Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) // Create the CRP later so that failed override won't block the rollout @@ -559,7 +559,7 @@ var _ = Context("creating resourceOverride with incorrect path", Ordered, func() cleanupResourceOverride(roName, roNamespace) }) - It("should update CRP status as expected", func() { + It("should update CRP status as failed to override", func() { wantRONames := []placementv1beta1.NamespacedName{ {Namespace: roNamespace, Name: fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, roName, 0)}, } @@ -840,3 +840,221 @@ var _ = Context("creating resourceOverride with delete configMap", Ordered, func }, consistentlyDuration, consistentlyInterval).Should(BeTrue(), "Failed to delete work resources on member cluster %s", memberCluster.ClusterName) }) }) + +var _ = Context("creating resourceOverride with a templated rules with cluster label key replacement", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(roNameTemplate, GinkgoParallelProcess()) + roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the ro before crp so that the observed resource index is predictable. + ro := &placementv1alpha1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: roNamespace, + }, + Spec: placementv1alpha1.ResourceOverrideSpec{ + Placement: &placementv1alpha1.PlacementRef{ + Name: crpName, // assigned CRP name + }, + ResourceSelectors: configMapSelector(), + Policy: &placementv1alpha1.OverridePolicy{ + OverrideRules: []placementv1alpha1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1alpha1.JSONPatchOverride{ + { + Operator: placementv1alpha1.JSONPatchOverrideOpAdd, + Path: "/data/region", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s%s}"`, placementv1alpha1.OverrideClusterLabelKeyVariablePrefix, regionLabelName))}, + }, + { + Operator: placementv1alpha1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"newdata-%s%s}"`, placementv1alpha1.OverrideClusterLabelKeyVariablePrefix, envLabelName))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + + // Create the CRP. + createCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, roNamespace) + }) + + It("should update CRP status as expected", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: roNamespace, Name: fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, roName, 0)}, + } + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("should replace the cluster label key in the configMap", func() { + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": fmt.Sprintf("newdata-%s", labelsByClusterName[cluster.ClusterName][envLabelName]), + "region": labelsByClusterName[cluster.ClusterName][regionLabelName], + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Eventually(configMapActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update configmap %s data as expected", cmName) + } + }) + + It("should handle non-existent cluster label key gracefully", func() { + By("Update the ResourceOverride to use a non-existent label key") + Eventually(func() error { + ro := &placementv1alpha1.ResourceOverride{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: roName, Namespace: roNamespace}, ro); err != nil { + return err + } + ro.Spec.Policy.OverrideRules[0].JSONPatchOverrides[0].Value = apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%snon-existent-label}"`, placementv1alpha1.OverrideClusterLabelKeyVariablePrefix))} + return hubClient.Update(ctx, ro) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resourceOverride %s with non-existent label key", roName) + + By("Verify the CRP status should have one cluster failed to override while the rest stuck in rollout") + // TODO: need to construct the expected status + + By("Verify the configMap remains unchanged") + cmName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cmNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + for _, cluster := range allMemberClusters { + wantConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "data": fmt.Sprintf("newdata-%s", labelsByClusterName[cluster.ClusterName][envLabelName]), + "region": labelsByClusterName[cluster.ClusterName][regionLabelName], + }, + } + configMapActual := configMapPlacedOnClusterActual(cluster, wantConfigMap) + Consistently(configMapActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "ConfigMap %s should remain unchanged", cmName) + } + }) +}) + +var _ = Context("creating resourceOverride with non-exist label", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + roName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + roNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + // Create the bad ro. + ro := &placementv1alpha1.ResourceOverride{ + ObjectMeta: metav1.ObjectMeta{ + Name: roName, + Namespace: roNamespace, + }, + Spec: placementv1alpha1.ResourceOverrideSpec{ + Placement: &placementv1alpha1.PlacementRef{ + Name: crpName, // assigned CRP name + }, + ResourceSelectors: configMapSelector(), + Policy: &placementv1alpha1.OverridePolicy{ + OverrideRules: []placementv1alpha1.OverrideRule{ + { + ClusterSelector: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + { + Key: envLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + }, + }, + }, + JSONPatchOverrides: []placementv1alpha1.JSONPatchOverride{ + { + Operator: placementv1alpha1.JSONPatchOverrideOpAdd, + Path: "/data/region", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"%s%s}"`, placementv1alpha1.OverrideClusterLabelKeyVariablePrefix, "non-existent-label"))}, + }, + { + Operator: placementv1alpha1.JSONPatchOverrideOpReplace, + Path: "/data/data", + Value: apiextensionsv1.JSON{Raw: []byte(fmt.Sprintf(`"newdata-%s%s}"`, placementv1alpha1.OverrideClusterLabelKeyVariablePrefix, envLabelName))}, + }, + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating the bad resourceOverride %s", roName)) + Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) + + // Create the CRP later so that failed override won't block the rollout + createCRP(crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + + By(fmt.Sprintf("deleting resourceOverride %s", roName)) + cleanupResourceOverride(roName, roNamespace) + }) + + It("should update CRP status as failed to override", func() { + wantRONames := []placementv1beta1.NamespacedName{ + {Namespace: roNamespace, Name: fmt.Sprintf(placementv1alpha1.OverrideSnapshotNameFmt, roName, 0)}, + } + crpStatusUpdatedActual := crpStatusWithOverrideUpdatedFailedActual(workResourceIdentifiers(), allMemberClusterNames, "0", nil, wantRONames) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + // This check will ignore the annotation of resources. + It("should not place the selected resources on member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) +}) diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index f1c9fb036..931704bad 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -685,6 +685,93 @@ var _ = Describe("validating CRP when selecting a reserved resource", Ordered, f }) }) +var _ = Describe("When creating a pickN ClusterResourcePlacement with duplicated resources", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + var crp *placementv1beta1.ClusterResourcePlacement + var existingNS corev1.Namespace + BeforeAll(func() { + By("creating work resources on hub cluster") + createWorkResources() + existingNS = appNamespace() + By("Create a crp that selects the same resource twice") + crp = &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Namespace", + Name: existingNS.Name, + }, + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Namespace", + Name: existingNS.Name, + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s", crpName)) + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("garbage all things related to placement %s", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + It("should update CRP status as expected", func() { + Eventually(func() error { + gotCRP := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, gotCRP); err != nil { + return err + } + wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: clusterresourceplacement.InvalidResourceSelectorsReason, + ObservedGeneration: 1, + }, + }, + } + if diff := cmp.Diff(gotCRP.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("updating the CRP to select one namespace", func() { + gotCRP := &placementv1beta1.ClusterResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, gotCRP)).Should(Succeed(), "Failed to get CRP %s", crpName) + gotCRP.Spec.ResourceSelectors = []placementv1beta1.ClusterResourceSelector{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Namespace", + Name: existingNS.Name, + }, + } + Expect(hubClient.Update(ctx, gotCRP)).To(Succeed(), "Failed to update CRP %s", crpName) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) +}) + var _ = Describe("validating CRP when failed to apply resources", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) var existingNS corev1.Namespace