From b487f81439b9f2088ce0ec7094363388992b6027 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Fri, 29 Aug 2025 16:51:00 +1000 Subject: [PATCH 1/9] Minor fixes Signed-off-by: michaelawyu --- test/e2e/enveloped_object_placement_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 8825fb3da..587ca2cd7 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -795,7 +795,7 @@ func createWrappedResourcesForEnvelopTest() { // Create ResourceEnvelope with ResourceQuota inside quotaBytes, err := json.Marshal(testResourceQuota) Expect(err).Should(Succeed()) - testResourceEnvelope.Data["resourceQuota1.yaml"] = runtime.RawExtension{Raw: quotaBytes} + testResourceEnvelope.Data["resourceQuota.yaml"] = runtime.RawExtension{Raw: quotaBytes} deploymentBytes, err := json.Marshal(testDeployment) Expect(err).Should(Succeed()) testResourceEnvelope.Data["deployment.yaml"] = runtime.RawExtension{Raw: deploymentBytes} From 588e5a99e6f58fe6f662dd816d859b6a934d07b2 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Sat, 30 Aug 2025 02:50:02 +1000 Subject: [PATCH 2/9] (Re-)enabled parallel processing in the work applier Signed-off-by: michaelawyu --- pkg/controllers/workapplier/process.go | 53 ++++++-- pkg/controllers/workapplier/waves.go | 180 +++++++++++++++++++++++++ 2 files changed, 225 insertions(+), 8 deletions(-) create mode 100644 pkg/controllers/workapplier/waves.go diff --git a/pkg/controllers/workapplier/process.go b/pkg/controllers/workapplier/process.go index 646433b1f..73e7e228a 100644 --- a/pkg/controllers/workapplier/process.go +++ b/pkg/controllers/workapplier/process.go @@ -37,15 +37,52 @@ func (r *Reconciler) processManifests( work *fleetv1beta1.Work, expectedAppliedWorkOwnerRef *metav1.OwnerReference, ) { - // TODO: We have to apply the namespace/crd/secret/configmap/pvc first - // then we can process some of the manifests in parallel. - for _, bundle := range bundles { - if bundle.applyOrReportDiffErr != nil { - // Skip a manifest if it has failed pre-processing. - continue + // Process all manifests in parallel. + // + // There are cases where certain groups of manifests should not be processed in parallel with + // each other (e.g., a config map must be applied after its owner namespace is applied); + // to address this situation, manifests are processed in waves: manifests in the same wave are + // processed in parallel, while different waves are processed sequentially. + + // As a special case, if the ReportDiff mode is on, all manifests are processed in parallel in + // one wave. + if work.Spec.ApplyStrategy != nil && work.Spec.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff { + doWork := func(piece int) { + if bundles[piece].applyOrReportDiffErr != nil { + // Skip a manifest if it has failed pre-processing. + return + } + + r.processOneManifest(ctx, bundles[piece], work, expectedAppliedWorkOwnerRef) + klog.V(2).InfoS("Processed a manifest", "manifestObj", klog.KObj(bundles[piece].manifestObj), "work", klog.KObj(work)) } - r.processOneManifest(ctx, bundle, work, expectedAppliedWorkOwnerRef) - klog.V(2).InfoS("Processed a manifest", "manifestObj", klog.KObj(bundle.manifestObj), "work", klog.KObj(work)) + + r.parallelizer.ParallelizeUntil(ctx, len(bundles), doWork, "processingManifestsInReportDiffMode") + return + } + + // Organize the bundles into different waves of bundles for parallel processing based on their + // GVR information. + processingWaves := organizeBundlesIntoProcessingWaves(bundles, klog.KObj(work)) + for idx := range processingWaves { + bundlesInWave := processingWaves[idx].bundles + + // TO-DO (chenyu1): evaluate if there is a way to avoid repeated closure + // assignment just for capturing variables. + doWork := func(piece int) { + if bundlesInWave[piece].applyOrReportDiffErr != nil { + // Skip a manifest if it has failed pre-processing. + // + // This added as a sanity check as the organization step normally + // would have already skipped all the manifests with processing failures. + return + } + + r.processOneManifest(ctx, bundlesInWave[piece], work, expectedAppliedWorkOwnerRef) + klog.V(2).InfoS("Processed a manifest", "manifestObj", klog.KObj(bundlesInWave[piece].manifestObj), "work", klog.KObj(work)) + } + + r.parallelizer.ParallelizeUntil(ctx, len(bundlesInWave), doWork, "processingManifests") } } diff --git a/pkg/controllers/workapplier/waves.go b/pkg/controllers/workapplier/waves.go new file mode 100644 index 000000000..6433584be --- /dev/null +++ b/pkg/controllers/workapplier/waves.go @@ -0,0 +1,180 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workapplier + +import ( + "slices" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" +) + +type waveNumber int + +const ( + lastWave waveNumber = 999 +) + +var ( + // The default wave number for all known Kubernetes resource type. + // + // Note (chenyu1): the waves below are based on the Helm resource installation + // order (see also the Helm source code). Similar objects are grouped together + // to achieve best performance. + defaultWaveNumberByResourceType = map[string]waveNumber{ + // Apply namespaces and priority classes first. + "namespaces": 0, + "priorityclasses": 0, + // Apply policies, configuration data, and other static resources second. + "networkpolicies": 1, + "resourcequotas": 1, + "limitranges": 1, + "podsecuritypolicies": 1, + "poddisruptionbudgets": 1, + "serviceaccounts": 1, + "secrets": 1, + "configmaps": 1, + "storageclasses": 1, + "persistentvolumes": 1, + "persistentvolumeclaims": 1, + "customresourcedefinitions": 1, + "ingressclasses": 1, + // Apply RBAC resources (cluster roles and roles). + "clusterroles": 2, + "roles": 2, + // Apply RBAC resources (cluster role bindings and role bindings). + "clusterrolebindings": 3, + "rolebindings": 3, + // Apply workloads and services. + "services": 4, + "daemonsets": 4, + "pods": 4, + "replicationcontrollers": 4, + "replicasets": 4, + "deployments": 4, + "horizontalpodautoscalers": 4, + "statefulsets": 4, + "jobs": 4, + "cronjobs": 4, + "ingresses": 4, + // Apply API services and webhooks. + "apiservices": 5, + "validatingwebhookconfigurations": 5, + "mutatingwebhookconfigurations": 5, + } + + // The API groups for all known Kubernetes resource types. + knownAPIGroups = sets.New( + // The core API group. + "", + // The networking API group (`networking.k8s.io`). + "networking.k8s.io", + // The scheduling API group (`scheduling.k8s.io`). + "scheduling.k8s.io", + // The policy API group (`policy.k8s.io`). + "policy", + // The storage API group (`storage.k8s.io`). + "storage.k8s.io", + // The API extensions API group (`apiextensions.k8s.io`). + "apiextensions.k8s.io", + // The RBAC authorization API group (`rbac.authorization.k8s.io`). + "rbac.authorization.k8s.io", + // The apps API group (`apps`). + "apps", + // The autoscaling API group (`autoscaling`). + "autoscaling", + // The API registration API group (`apiregistration.k8s.io`). + "apiregistration.k8s.io", + // The admission registration API group (`admissionregistration.k8s.io`). + "admissionregistration.k8s.io", + // The batch API group (`batch`). + "batch", + ) +) + +// bundleProcessingWave is a wave of bundles that can be processed in parallel. +type bundleProcessingWave struct { + num waveNumber + bundles []*manifestProcessingBundle +} + +// organizeBundlesIntoProcessingWaves organizes the list of bundles into different +// waves of bundles for parallel processing based on their GVR information. +func organizeBundlesIntoProcessingWaves(bundles []*manifestProcessingBundle, workRef klog.ObjectRef) []*bundleProcessingWave { + // Pre-allocate the map; 7 is the total count of default wave numbers, though + // not all wave numbers might be used. + waveByNum := make(map[waveNumber]*bundleProcessingWave, 7) + + getOrAddWave := func(num waveNumber) *bundleProcessingWave { + wave, ok := waveByNum[num] + if !ok { + wave = &bundleProcessingWave{ + num: num, + // Pre-allocate with a reasonable size. + bundles: make([]*manifestProcessingBundle, 0, 5), + } + waveByNum[num] = wave + } + return wave + } + + // For simplicity reasons, the organization itself runs in sequential order. + // Considering that the categorization itself is quick and the total number of bundles + // should be limited in most cases, this should not introduce significant overhead. + for idx := range bundles { + bundle := bundles[idx] + if bundle.gvr == nil { + // For manifest data that cannot be decoded, there might not be any available GVR + // information. Skip such processing bundles; this is not considered as an error. + klog.V(2).InfoS("Skipping a bundle with no GVR; no wave is assigned", + "ordinal", idx, "work", workRef) + continue + } + + if bundle.applyOrReportDiffErr != nil { + // An error has occurred before this step; such bundles need no further processing, + // skip them. This is not considered as an error. + klog.V(2).InfoS("Skipping a bundle with prior processing error; no wave is assigned", + "manifestObj", klog.KObj(bundle.manifestObj), "GVR", *bundle.gvr, "work", workRef) + continue + } + + waveNum := lastWave + defaultWaveNum, foundInDefaultWaveNumber := defaultWaveNumberByResourceType[bundle.gvr.Resource] + if foundInDefaultWaveNumber && knownAPIGroups.Has(bundle.gvr.Group) { + // The resource is a known one; assign the bundle to its default wave. + waveNum = defaultWaveNum + } + + wave := getOrAddWave(waveNum) + wave.bundles = append(wave.bundles, bundle) + klog.V(2).InfoS("Assigned manifest to a wave", + "waveNumber", waveNum, + "manifestObj", klog.KObj(bundle.manifestObj), "GVR", *bundle.gvr, "work", workRef) + } + + // Retrieve all the waves and sort them by their wave number. + waves := make([]*bundleProcessingWave, 0, len(waveByNum)) + for _, w := range waveByNum { + waves = append(waves, w) + } + // Sort the waves in ascending order. + slices.SortFunc(waves, func(a, b *bundleProcessingWave) int { + return int(a.num) - int(b.num) + }) + return waves +} From 504bf47860300f08b00976551868ac1fe4b4e418 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Tue, 2 Sep 2025 00:18:28 +1000 Subject: [PATCH 3/9] Added unit tests Signed-off-by: michaelawyu --- pkg/controllers/workapplier/process.go | 2 +- pkg/controllers/workapplier/waves_test.go | 1120 +++++++++++++++++++++ 2 files changed, 1121 insertions(+), 1 deletion(-) create mode 100644 pkg/controllers/workapplier/waves_test.go diff --git a/pkg/controllers/workapplier/process.go b/pkg/controllers/workapplier/process.go index 73e7e228a..d58c183c2 100644 --- a/pkg/controllers/workapplier/process.go +++ b/pkg/controllers/workapplier/process.go @@ -67,7 +67,7 @@ func (r *Reconciler) processManifests( for idx := range processingWaves { bundlesInWave := processingWaves[idx].bundles - // TO-DO (chenyu1): evaluate if there is a way to avoid repeated closure + // TO-DO (chenyu1): evaluate if there is a need to avoid repeated closure // assignment just for capturing variables. doWork := func(piece int) { if bundlesInWave[piece].applyOrReportDiffErr != nil { diff --git a/pkg/controllers/workapplier/waves_test.go b/pkg/controllers/workapplier/waves_test.go new file mode 100644 index 000000000..fcd67977b --- /dev/null +++ b/pkg/controllers/workapplier/waves_test.go @@ -0,0 +1,1120 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workapplier + +import ( + "fmt" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + + "github.com/google/go-cmp/cmp" + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" +) + +// TestOrganizeBundlesIntoProcessingWaves tests the organizeBundlesIntoProcessingWaves function. +func TestOrganizeBundlesIntoProcessingWaves(t *testing.T) { + work := &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + }, + } + workRef := klog.KObj(work) + + testCases := []struct { + name string + bundles []*manifestProcessingBundle + wantWaves []*bundleProcessingWave + }{ + { + name: "single bundle (known resource) into single wave", + bundles: []*manifestProcessingBundle{ + { + // Note: the IDs are added here purely for identification reasons; they are + // not consistent with other parts of the bundle, and do not reflect + // how the work applier actually sees bundles in an actual processing run. + // + // The same applies to other test cases in this spec. + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + // Note: the version part does not matter, as Kubernetes requires that one + // resource must be uniquely identified by the combo of its API group, resource type, + // namespace (if applicable), and name, but not the version. The information + // here is added for completeness reasons. + // + // The same applies to other tests cases in this spec. + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + }, + }, + { + // Normally this test case will never occur; it is added for completeness reasons. + name: "bundle with decoding errors (no GVR)", + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + gvr: nil, + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + }, + }, + { + name: "bundle with decoding errors (invalid JSON)", + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{}, + applyOrReportDiffErr: fmt.Errorf("failed to unmarshal JSON"), + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + }, + }, + { + name: "bundle with decoding errors (unregistered API)", + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{}, + applyOrReportDiffErr: fmt.Errorf("failed to find GVR from member cluster client REST mapping"), + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + }, + }, + { + name: "bundle with unknown resource type", + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "placeholders", + }, + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + { + num: lastWave, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "placeholders", + }, + }, + }, + }, + }, + }, + { + name: "bundle with known resource type but different API group", + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "dummy", + Version: "v1", + Resource: "deployments", + }, + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + { + num: lastWave, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "dummy", + Version: "v1", + Resource: "deployments", + }, + }, + }, + }, + }, + }, + { + name: "bundle with unknown resource type from unknown API group", + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "dummy", + Version: "v10", + Resource: "placeholders", + }, + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + { + num: lastWave, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "dummy", + Version: "v10", + Resource: "placeholders", + }, + }, + }, + }, + }, + }, + { + name: "mixed", + // The bundles below feature all known resource types from known API groups + // (in reverse order by wave number), plus unknown resources and bundles with errors. + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "admissionregistration.k8s.io", + Version: "v1", + Resource: "mutatingwebhookconfigurations", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "admissionregistration.k8s.io", + Version: "v1", + Resource: "validatingwebhookconfigurations", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + gvr: &schema.GroupVersionResource{ + Group: "apiregistration.k8s.io", + Version: "v1", + Resource: "apiservices", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 3, + }, + gvr: &schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "ingresses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 4, + }, + gvr: &schema.GroupVersionResource{ + Group: "batch", + Version: "v1", + Resource: "cronjobs", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 5, + }, + gvr: &schema.GroupVersionResource{ + Group: "batch", + Version: "v1", + Resource: "jobs", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 6, + }, + gvr: &schema.GroupVersionResource{ + Group: "autoscaling", + Version: "v1", + Resource: "horizontalpodautoscalers", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 7, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "statefulsets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 8, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 9, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "replicasets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 10, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "replicationcontrollers", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 11, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "pods", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 12, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "daemonsets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 13, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "services", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 14, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "rolebindings", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 15, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "clusterrolebindings", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 16, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "roles", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 17, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "clusterroles", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 18, + }, + gvr: &schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "ingressclasses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 19, + }, + gvr: &schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 20, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "persistentvolumeclaims", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 21, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "persistentvolumes", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 22, + }, + gvr: &schema.GroupVersionResource{ + Group: "storage.k8s.io", + Version: "v1", + Resource: "storageclasses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 23, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "configmaps", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 24, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "secrets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 25, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "serviceaccounts", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 26, + }, + gvr: &schema.GroupVersionResource{ + Group: "policy", + Version: "v1", + Resource: "poddisruptionbudgets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 27, + }, + gvr: &schema.GroupVersionResource{ + Group: "policy", + Version: "v1", + Resource: "podsecuritypolicies", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 28, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "limitranges", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 29, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "resourcequotas", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 30, + }, + gvr: &schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "networkpolicies", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 31, + }, + gvr: &schema.GroupVersionResource{ + Group: "scheduling.k8s.io", + Version: "v1", + Resource: "priorityclasses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 32, + }, + gvr: &schema.GroupVersionResource{ + Group: "dummy", + Version: "v10", + Resource: "placeholders", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 33, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 34, + }, + gvr: &schema.GroupVersionResource{}, + applyOrReportDiffErr: fmt.Errorf("failed to find GVR from member cluster client REST mapping"), + }, + }, + wantWaves: []*bundleProcessingWave{ + { + num: 0, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 31, + }, + gvr: &schema.GroupVersionResource{ + Group: "scheduling.k8s.io", + Version: "v1", + Resource: "priorityclasses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 33, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + }, + }, + }, + }, + { + num: 1, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 18, + }, + gvr: &schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "ingressclasses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 19, + }, + gvr: &schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 20, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "persistentvolumeclaims", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 21, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "persistentvolumes", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 22, + }, + gvr: &schema.GroupVersionResource{ + Group: "storage.k8s.io", + Version: "v1", + Resource: "storageclasses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 23, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "configmaps", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 24, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "secrets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 25, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "serviceaccounts", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 26, + }, + gvr: &schema.GroupVersionResource{ + Group: "policy", + Version: "v1", + Resource: "poddisruptionbudgets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 27, + }, + gvr: &schema.GroupVersionResource{ + Group: "policy", + Version: "v1", + Resource: "podsecuritypolicies", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 28, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "limitranges", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 29, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "resourcequotas", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 30, + }, + gvr: &schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "networkpolicies", + }, + }, + }, + }, + { + num: 2, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 16, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "roles", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 17, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "clusterroles", + }, + }, + }, + }, + { + num: 3, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 14, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "rolebindings", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 15, + }, + gvr: &schema.GroupVersionResource{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Resource: "clusterrolebindings", + }, + }, + }, + }, + { + num: 4, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 3, + }, + gvr: &schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "ingresses", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 4, + }, + gvr: &schema.GroupVersionResource{ + Group: "batch", + Version: "v1", + Resource: "cronjobs", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 5, + }, + gvr: &schema.GroupVersionResource{ + Group: "batch", + Version: "v1", + Resource: "jobs", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 6, + }, + gvr: &schema.GroupVersionResource{ + Group: "autoscaling", + Version: "v1", + Resource: "horizontalpodautoscalers", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 7, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "statefulsets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 8, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 9, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "replicasets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 10, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "replicationcontrollers", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 11, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "pods", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 12, + }, + gvr: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "daemonsets", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 13, + }, + gvr: &schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "services", + }, + }, + }, + }, + { + num: 5, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + }, + gvr: &schema.GroupVersionResource{ + Group: "admissionregistration.k8s.io", + Version: "v1", + Resource: "mutatingwebhookconfigurations", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + }, + gvr: &schema.GroupVersionResource{ + Group: "admissionregistration.k8s.io", + Version: "v1", + Resource: "validatingwebhookconfigurations", + }, + }, + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + }, + gvr: &schema.GroupVersionResource{ + Group: "apiregistration.k8s.io", + Version: "v1", + Resource: "apiservices", + }, + }, + }, + }, + { + num: lastWave, + bundles: []*manifestProcessingBundle{ + { + id: &fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 32, + }, + gvr: &schema.GroupVersionResource{ + Group: "dummy", + Version: "v10", + Resource: "placeholders", + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + waves := organizeBundlesIntoProcessingWaves(tc.bundles, workRef) + if diff := cmp.Diff( + waves, tc.wantWaves, + cmp.AllowUnexported(manifestProcessingBundle{}, bundleProcessingWave{}), + ); diff != "" { + t.Errorf("organized waves mismatch (-got, +want):\n%s", diff) + } + }) + } +} From 04b072c721f0fbe29427163cbb0b29e470517a8f Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Thu, 28 Aug 2025 13:38:22 +0800 Subject: [PATCH 4/9] test: add RP apply strategy tests (#226) --------- Signed-off-by: Zhiying Lin Signed-off-by: michaelawyu --- test/e2e/actuals_test.go | 76 +- test/e2e/placement_apply_strategy_test.go | 10 +- test/e2e/placement_cro_test.go | 6 +- test/e2e/placement_drift_diff_test.go | 14 +- test/e2e/placement_ro_test.go | 6 +- .../resource_placement_apply_strategy_test.go | 1433 +++++++++++++++++ test/e2e/resource_placement_pickall_test.go | 22 +- test/e2e/resource_placement_ro_test.go | 238 +-- test/e2e/resources_test.go | 12 +- test/e2e/updaterun_test.go | 4 +- test/e2e/utils_test.go | 135 +- 11 files changed, 1687 insertions(+), 269 deletions(-) create mode 100644 test/e2e/resource_placement_apply_strategy_test.go diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 93ae85545..506916bc9 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -111,7 +111,7 @@ func validateConfigMapOnCluster(cluster *framework.Cluster, name types.Namespace return nil } -func validateOverrideAnnotationOfConfigMapOnCluster(cluster *framework.Cluster, wantAnnotations map[string]string) error { +func validateAnnotationOfConfigMapOnCluster(cluster *framework.Cluster, wantAnnotations map[string]string) error { workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) @@ -519,6 +519,41 @@ func crpRolloutPendingDueToExternalStrategyConditions(generation int64) []metav1 } } +func rpAppliedFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementAppliedConditionType), + Status: metav1.ConditionFalse, + Reason: condition.ApplyFailedReason, + ObservedGeneration: generation, + }, + } +} + func crpAppliedFailedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -599,6 +634,45 @@ func crpNotAvailableConditions(generation int64, hasOverride bool) []metav1.Cond } } +func rpDiffReportedConditions(generation int64, hasOverride bool) []metav1.Condition { + overrideConditionReason := condition.OverrideNotSpecifiedReason + if hasOverride { + overrideConditionReason = condition.OverriddenSucceededReason + } + return []metav1.Condition{ + { + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Status: metav1.ConditionTrue, + Reason: scheduler.FullyScheduledReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementRolloutStartedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.RolloutStartedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementOverriddenConditionType), + Status: metav1.ConditionTrue, + Reason: overrideConditionReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementWorkSynchronizedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ResourcePlacementDiffReportedConditionType), + Status: metav1.ConditionTrue, + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: generation, + }, + } +} + func crpDiffReportedConditions(generation int64, hasOverride bool) []metav1.Condition { overrideConditionReason := condition.OverrideNotSpecifiedReason if hasOverride { diff --git a/test/e2e/placement_apply_strategy_test.go b/test/e2e/placement_apply_strategy_test.go index 8fa41b0d4..93848c7f4 100644 --- a/test/e2e/placement_apply_strategy_test.go +++ b/test/e2e/placement_apply_strategy_test.go @@ -77,7 +77,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { // Create the CRP. strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: true} - createCRPWithApplyStrategy(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy, nil) }) AfterAll(func() { @@ -136,7 +136,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { // Create the CRP. strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: false} - createCRPWithApplyStrategy(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy, nil) }) AfterAll(func() { @@ -189,7 +189,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Type: placementv1beta1.ApplyStrategyTypeServerSideApply, AllowCoOwnership: false, } - createCRPWithApplyStrategy(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy, nil) }) AfterAll(func() { @@ -242,7 +242,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { Type: placementv1beta1.ApplyStrategyTypeServerSideApply, AllowCoOwnership: false, } - createCRPWithApplyStrategy(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy, nil) }) AfterAll(func() { @@ -371,7 +371,7 @@ var _ = Describe("validating CRP when resources exists", Ordered, func() { ServerSideApplyConfig: &placementv1beta1.ServerSideApplyConfig{ForceConflicts: true}, AllowCoOwnership: true, } - createCRPWithApplyStrategy(crpName, strategy) + createCRPWithApplyStrategy(crpName, strategy, nil) }) AfterAll(func() { diff --git a/test/e2e/placement_cro_test.go b/test/e2e/placement_cro_test.go index fb08032d3..0419e29c8 100644 --- a/test/e2e/placement_cro_test.go +++ b/test/e2e/placement_cro_test.go @@ -391,7 +391,7 @@ var _ = Context("creating clusterResourceOverride with different rules for each for i, cluster := range allMemberClusters { wantAnnotations := map[string]string{croTestAnnotationKey: fmt.Sprintf("%s-%d", croTestAnnotationValue, i)} Expect(validateAnnotationOfWorkNamespaceOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", cluster.ClusterName) - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) } }) }) @@ -466,7 +466,7 @@ var _ = Context("creating clusterResourceOverride with different rules for each for _, cluster := range allMemberClusters { wantAnnotations := map[string]string{croTestAnnotationKey: fmt.Sprintf("test-%s", cluster.ClusterName)} Expect(validateAnnotationOfWorkNamespaceOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", cluster.ClusterName) - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) } }) }) @@ -691,7 +691,7 @@ var _ = Context("creating clusterResourceOverride with delete rules for one clus cluster := allMemberClusters[idx] wantAnnotations := map[string]string{croTestAnnotationKey1: croTestAnnotationValue1} Expect(validateAnnotationOfWorkNamespaceOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", cluster.ClusterName) - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) } }) diff --git a/test/e2e/placement_drift_diff_test.go b/test/e2e/placement_drift_diff_test.go index 1c0b56448..d3934e6ab 100644 --- a/test/e2e/placement_drift_diff_test.go +++ b/test/e2e/placement_drift_diff_test.go @@ -74,7 +74,7 @@ var _ = Describe("take over existing resources", func() { ComparisonOption: placementv1beta1.ComparisonOptionTypePartialComparison, WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeAlways, } - createCRPWithApplyStrategy(crpName, applyStrategy) + createCRPWithApplyStrategy(crpName, applyStrategy, nil) }) It("should update CRP status as expected", func() { @@ -174,7 +174,7 @@ var _ = Describe("take over existing resources", func() { ComparisonOption: placementv1beta1.ComparisonOptionTypePartialComparison, WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeIfNoDiff, } - createCRPWithApplyStrategy(crpName, applyStrategy) + createCRPWithApplyStrategy(crpName, applyStrategy, nil) }) It("should update CRP status as expected", func() { @@ -346,7 +346,7 @@ var _ = Describe("take over existing resources", func() { ComparisonOption: placementv1beta1.ComparisonOptionTypeFullComparison, WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeIfNoDiff, } - createCRPWithApplyStrategy(crpName, applyStrategy) + createCRPWithApplyStrategy(crpName, applyStrategy, nil) }) It("should update CRP status as expected", func() { @@ -526,7 +526,7 @@ var _ = Describe("detect drifts on placed resources", func() { ComparisonOption: placementv1beta1.ComparisonOptionTypeFullComparison, WhenToApply: placementv1beta1.WhenToApplyTypeAlways, } - createCRPWithApplyStrategy(crpName, applyStrategy) + createCRPWithApplyStrategy(crpName, applyStrategy, nil) }) It("should update CRP status as expected", func() { @@ -665,7 +665,7 @@ var _ = Describe("detect drifts on placed resources", func() { ComparisonOption: placementv1beta1.ComparisonOptionTypePartialComparison, WhenToApply: placementv1beta1.WhenToApplyTypeIfNotDrifted, } - createCRPWithApplyStrategy(crpName, applyStrategy) + createCRPWithApplyStrategy(crpName, applyStrategy, nil) }) It("should update CRP status as expected", func() { @@ -849,7 +849,7 @@ var _ = Describe("detect drifts on placed resources", func() { ComparisonOption: placementv1beta1.ComparisonOptionTypeFullComparison, WhenToApply: placementv1beta1.WhenToApplyTypeIfNotDrifted, } - createCRPWithApplyStrategy(crpName, applyStrategy) + createCRPWithApplyStrategy(crpName, applyStrategy, nil) }) It("should update CRP status as expected", func() { @@ -1079,7 +1079,7 @@ var _ = Describe("report diff mode", func() { Type: placementv1beta1.ApplyStrategyTypeReportDiff, WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeNever, } - createCRPWithApplyStrategy(crpName, applyStrategy) + createCRPWithApplyStrategy(crpName, applyStrategy, nil) }) It("should update CRP status as expected", func() { diff --git a/test/e2e/placement_ro_test.go b/test/e2e/placement_ro_test.go index e4c662c0e..87e0f181d 100644 --- a/test/e2e/placement_ro_test.go +++ b/test/e2e/placement_ro_test.go @@ -383,7 +383,7 @@ var _ = Context("creating resourceOverride with different rules for each cluster It("should have override annotations on the configmap", func() { for i, cluster := range allMemberClusters { wantAnnotations := map[string]string{roTestAnnotationKey: fmt.Sprintf("%s-%d", roTestAnnotationValue, i)} - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) } }) }) @@ -498,7 +498,7 @@ var _ = Context("creating resourceOverride and clusterResourceOverride, resource want := map[string]string{croTestAnnotationKey: croTestAnnotationValue} for _, cluster := range allMemberClusters { Expect(validateAnnotationOfWorkNamespaceOnCluster(cluster, want)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", cluster.ClusterName) - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, want)).ShouldNot(Succeed(), "ResourceOverride Should win, ClusterResourceOverride annotated on $s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, want)).ShouldNot(Succeed(), "ResourceOverride Should win, ClusterResourceOverride annotated on $s", cluster.ClusterName) } }) }) @@ -837,7 +837,7 @@ var _ = Context("creating resourceOverride with delete configMap", Ordered, func for idx := 0; idx < 2; idx++ { cluster := allMemberClusters[idx] wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue} - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) } }) diff --git a/test/e2e/resource_placement_apply_strategy_test.go b/test/e2e/resource_placement_apply_strategy_test.go new file mode 100644 index 000000000..51ac4614b --- /dev/null +++ b/test/e2e/resource_placement_apply_strategy_test.go @@ -0,0 +1,1433 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/workapplier" +) + +var _ = Describe("validating resource placement using different apply strategies", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + annotationKey := "annotation-key" + annotationValue := "annotation-value" + annotationUpdatedValue := "annotation-updated-value" + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + anotherOwnerReference := metav1.OwnerReference{} + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createNamespace() + + // Create the CRP with Namespace-only selector. + createNamespaceOnlyCRP(crpName) + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Describe("validating RP when resources exists", Ordered, func() { + BeforeAll(func() { + By("creating configMap on hub cluster") + createConfigMap() + + By("creating owner reference for the configmap") + anotherOwnerReference = createAnotherValidOwnerReferenceForConfigMap(workNamespaceName, fmt.Sprintf("owner-configmap-%d", GinkgoParallelProcess())) + }) + + AfterAll(func() { + By("deleting created configMap on hub cluster") + cleanupConfigMap() + + By("deleting owner reference configmap") + cleanupAnotherValidOwnerReferenceForConfigMap(workNamespaceName, anotherOwnerReference.Name) + }) + + Context("Test a RP place objects successfully (client-side-apply and allow co-own)", Ordered, func() { + BeforeAll(func() { + cm := appConfigMap() + cm.SetOwnerReferences([]metav1.OwnerReference{ + anotherOwnerReference, + }) + cm.Annotations = map[string]string{ + annotationKey: annotationValue, + } + By(fmt.Sprintf("creating configmap %s/%s on member cluster", cm.Namespace, cm.Name)) + Expect(allMemberClusters[0].KubeClient.Create(ctx, &cm)).Should(Succeed(), "Failed to create configmap %s/%s", cm.Namespace, cm.Name) + + // Create the RP. + strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: true} + createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s/%s", workNamespaceName, rpName)) + cleanupPlacement(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + + By("deleting created config map on member cluster") + cleanupConfigMapOnCluster(allMemberClusters[0]) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have annotations on the configmap", func() { + want := map[string]string{annotationKey: annotationValue} + Expect(validateAnnotationOfConfigMapOnCluster(memberCluster1EastProd, want)).Should(Succeed(), "Failed to override the annotation of work configmap on %s", memberCluster1EastProdName) + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s", rpName) + }) + + It("should remove placed resources from member clusters excluding the first one", func() { + checkIfRemovedConfigMapFromMemberClusters(allMemberClusters[1:]) + }) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Namespace: workNamespaceName, Name: rpName}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s", rpName) + }) + + It("configmap should be kept on member cluster", func() { + checkConfigMapExistsWithOwnerRefOnMemberCluster(workNamespaceName, configMapName, rpName) + }) + }) + + Context("Test a RP place objects successfully (client-side-apply and disallow co-own) and existing resource has no owner reference", Ordered, func() { + BeforeAll(func() { + cm := appConfigMap() + cm.Annotations = map[string]string{ + annotationKey: annotationValue, + } + By(fmt.Sprintf("creating configmap %s/%s on member cluster", cm.Namespace, cm.Name)) + Expect(allMemberClusters[0].KubeClient.Create(ctx, &cm)).Should(Succeed(), "Failed to create configmap %s/%s", cm.Namespace, cm.Name) + + // Create the RP. + strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: false} + createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s/%s", workNamespaceName, rpName)) + cleanupPlacement(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have annotations on the configmap", func() { + want := map[string]string{annotationKey: annotationValue} + Expect(validateAnnotationOfConfigMapOnCluster(memberCluster1EastProd, want)).Should(Succeed(), "Failed to override the annotation of work configmap on %s", memberCluster1EastProdName) + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s", rpName) + }) + + It("should remove the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s", rpName) + }) + }) + + Context("Test a RP place objects successfully (server-side-apply and disallow co-own) and existing resource has no owner reference", Ordered, func() { + BeforeAll(func() { + cm := appConfigMap() + cm.Annotations = map[string]string{ + annotationKey: annotationValue, + } + By(fmt.Sprintf("creating configmap %s/%s on member cluster", cm.Namespace, cm.Name)) + Expect(allMemberClusters[0].KubeClient.Create(ctx, &cm)).Should(Succeed(), "Failed to create configmap %s/%s", cm.Namespace, cm.Name) + + // Create the RP. + strategy := &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeServerSideApply, + AllowCoOwnership: false, + } + createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s/%s", workNamespaceName, rpName)) + cleanupPlacement(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have annotations on the configmap", func() { + want := map[string]string{annotationKey: annotationValue} + Expect(validateAnnotationOfConfigMapOnCluster(memberCluster1EastProd, want)).Should(Succeed(), "Failed to override the annotation of work configmap on %s", memberCluster1EastProdName) + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s", rpName) + }) + + It("should remove the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s", rpName) + }) + }) + + Context("Test a RP fail to apply configmap (server-side-apply and disallow co-own) and existing resource is owned by others", Ordered, func() { + BeforeAll(func() { + cm := appConfigMap() + cm.SetOwnerReferences([]metav1.OwnerReference{ + anotherOwnerReference, + }) + By(fmt.Sprintf("creating configmap %s/%s on member cluster", cm.Namespace, cm.Name)) + Expect(allMemberClusters[0].KubeClient.Create(ctx, &cm)).Should(Succeed(), "Failed to create configmap %s/%s", cm.Namespace, cm.Name) + + // Create the RP. + strategy := &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeServerSideApply, + AllowCoOwnership: false, + } + createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s/%s", workNamespaceName, rpName)) + cleanupPlacement(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + + By("deleting created config map on member cluster") + cleanupConfigMapOnCluster(allMemberClusters[0]) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return err + } + + wantStatus := placementv1beta1.PlacementStatus{ + Conditions: rpAppliedFailedConditions(rp.Generation), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Kind: "ConfigMap", + Name: configMapName, + Version: "v1", + Namespace: workNamespaceName, + }, + Condition: metav1.Condition{ + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), + ObservedGeneration: 0, + }, + }, + }, + Conditions: perClusterApplyFailedConditions(rp.Generation), + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: perClusterRolloutCompletedConditions(rp.Generation, true, false), + }, + { + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: perClusterRolloutCompletedConditions(rp.Generation, true, false), + }, + }, + SelectedResources: appConfigMapIdentifiers(), + ObservedResourceIndex: "0", + } + if diff := cmp.Diff(rp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + } + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s", rpName) + }) + + It("should remove placed resources from member clusters excluding the first one", func() { + checkIfRemovedConfigMapFromMemberClusters(allMemberClusters[1:]) + }) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s", rpName) + }) + + It("configmap should be kept on member cluster", func() { + checkConfigMapExistsWithOwnerRefOnMemberCluster(workNamespaceName, configMapName, rpName) + }) + }) + + Context("Test a RP able to apply configmap when the conflicted annotation is managed by others (force server-side-apply and allow co-own)", Ordered, func() { + BeforeAll(func() { + cm := appConfigMap() + cm.SetOwnerReferences([]metav1.OwnerReference{ + anotherOwnerReference, + }) + cm.Annotations = map[string]string{ + annotationKey: annotationValue, + } + options := client.CreateOptions{FieldManager: e2eTestFieldManager} + By(fmt.Sprintf("creating configmap %s/%s on member cluster", cm.Namespace, cm.Name)) + Expect(allMemberClusters[0].KubeClient.Create(ctx, &cm, &options)).Should(Succeed(), "Failed to create configmap %s/%s", cm.Namespace, cm.Name) + + By(fmt.Sprintf("updating configmap %s/%s annotation on hub cluster", cm.Namespace, cm.Name)) + Expect(hubClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: workNamespaceName}, &cm)).Should(Succeed(), "Failed to get configmap %s/%s", workNamespaceName, configMapName) + cm.Annotations = map[string]string{ + annotationKey: annotationUpdatedValue, + } + Expect(hubClient.Update(ctx, &cm)).Should(Succeed(), "Failed to update configmap %s/%s", workNamespaceName, configMapName) + + // Create the RP. + strategy := &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeServerSideApply, + ServerSideApplyConfig: &placementv1beta1.ServerSideApplyConfig{ForceConflicts: true}, + AllowCoOwnership: true, + } + createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s/%s", workNamespaceName, rpName)) + cleanupPlacement(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + + By("deleting created config map on member cluster") + cleanupConfigMapOnCluster(allMemberClusters[0]) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + // This check will ignore the annotation of resources. + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have updated annotations on the configmap of all clusters", func() { + want := map[string]string{annotationKey: annotationUpdatedValue} + for _, c := range allMemberClusters { + Expect(validateAnnotationOfConfigMapOnCluster(c, want)).Should(Succeed(), "Failed to override the annotation of work configmap on %s", c.ClusterName) + } + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s", rpName) + }) + + It("should remove placed resources from member clusters excluding the first one", func() { + checkIfRemovedConfigMapFromMemberClusters(allMemberClusters[1:]) + }) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s", rpName) + }) + + It("configmap should be kept on member cluster", func() { + checkConfigMapExistsWithOwnerRefOnMemberCluster(workNamespaceName, configMapName, rpName) + }) + }) + + Context("no dual placement", Ordered, func() { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + conflictedRPName := "rp-conflicted" + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + configMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + ApplyStrategy: &placementv1beta1.ApplyStrategy{ + AllowCoOwnership: true, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed()) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on member clusters", func() { + workResourcesPlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster1EastProdName) + }) + + It("can create a conflicted RP", func() { + conflictedRP := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: conflictedRPName, + Namespace: workNamespaceName, + // No need for the custom deletion blocker finalizer. + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + ApplyStrategy: &placementv1beta1.ApplyStrategy{ + AllowCoOwnership: true, + }, + }, + }, + } + Expect(hubClient.Create(ctx, conflictedRP)).To(Succeed()) + }) + + It("should update conflicted RP status as expected", func() { + buildWantRPStatus := func(rpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + Conditions: rpAppliedFailedConditions(rpGeneration), + SelectedResources: appConfigMapIdentifiers(), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: perClusterApplyFailedConditions(rpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "ConfigMap", + Name: configMapName, + Namespace: workNamespaceName, + }, + Condition: metav1.Condition{ + Type: string(placementv1beta1.PerClusterAppliedConditionType), + Status: metav1.ConditionFalse, + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), + }, + }, + }, + }, + }, + ObservedResourceIndex: "0", + } + } + + Eventually(func() error { + conflictedRP := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: conflictedRPName, Namespace: workNamespaceName}, conflictedRP); err != nil { + return err + } + wantRPStatus := buildWantRPStatus(conflictedRP.Generation) + + if diff := cmp.Diff(conflictedRP.Status, *wantRPStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should have no effect on previously created RP", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Consistently(rpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should not add additional owner reference to affected resources", func() { + expectedOwnerRef := buildOwnerReference(memberCluster1EastProd, fmt.Sprintf("%s.%s", workNamespaceName, rpName)) + + cm := &corev1.ConfigMap{} + Expect(memberCluster1EastProdClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm)).To(Succeed()) + + // The difference has been overwritten. + wantCM := appConfigMap() + wantCM.OwnerReferences = []metav1.OwnerReference{*expectedOwnerRef} + + // No need to use an Eventually block as this spec runs after the RP status has been verified. + diff := cmp.Diff( + cm, &wantCM, + ignoreObjectMetaAutoGenExceptOwnerRefFields, + ignoreObjectMetaAnnotationField, + ) + Expect(diff).To(BeEmpty(), "ConfigMap diff (-got +want):\n%s", diff) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, allMemberClusters) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: conflictedRPName, Namespace: workNamespaceName}, allMemberClusters) + }) + }) + }) + + Describe("SSA", Ordered, func() { + Context("use server-side apply to place resources (with changes)", func() { + // The key here should match the one used in the default config map. + cmDataKey := "data" + cmDataVal1 := "foobar" + + BeforeAll(func() { + createConfigMap() + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: ptr.To(intstr.FromInt(1)), + MaxSurge: ptr.To(intstr.FromInt(1)), + UnavailablePeriodSeconds: ptr.To(2), + }, + ApplyStrategy: &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed()) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("can update the manifests", func() { + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm); err != nil { + return fmt.Errorf("failed to get configmap: %w", err) + } + + if cm.Data == nil { + cm.Data = make(map[string]string) + } + cm.Data[cmDataKey] = cmDataVal1 + + if err := hubClient.Update(ctx, cm); err != nil { + return fmt.Errorf("failed to update configmap: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the manifests") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + // Longer timeout is used to allow full rollouts. + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should refresh the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberClient := allMemberClusters[idx].KubeClient + + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := memberClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm); err != nil { + return fmt.Errorf("failed to get configmap: %w", err) + } + + // To keep things simple, here the config map for comparison is + // rebuilt from the retrieved data. + rebuiltCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cm.Name, + Namespace: cm.Namespace, + }, + Data: cm.Data, + } + wantCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: workNamespaceName, + }, + Data: map[string]string{ + cmDataKey: cmDataVal1, + }, + } + if diff := cmp.Diff(rebuiltCM, wantCM); diff != "" { + return fmt.Errorf("configMap diff (-got, +want):\n%s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to refresh the resources on %s", allMemberClusters[idx].ClusterName) + } + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, allMemberClusters) + }) + }) + + Context("fall back to server-side apply when client-side apply cannot be used", func() { + cmDataKey := "randomBase64Str" + + BeforeAll(func() { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: workNamespaceName, + }, + Data: map[string]string{ + cmDataKey: "", + }, + } + Expect(hubClient.Create(ctx, cm)).To(Succeed(), "Failed to create configMap %s in namespace %s", configMapName, workNamespaceName) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: ptr.To(intstr.FromInt(1)), + MaxSurge: ptr.To(intstr.FromInt(1)), + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed()) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on the selected member cluster", func() { + workResourcesPlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster1EastProdName) + }) + + It("can update the manifests", func() { + // Update the configMap to add a large enough data piece so that + // client-side apply is no longer possible. + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm); err != nil { + return fmt.Errorf("failed to get configMap: %w", err) + } + + if cm.Data == nil { + cm.Data = make(map[string]string) + } + // Generate a large bytes array. + // + // Kubernetes will reject configMaps larger than 1048576 bytes (~1 MB); + // and when an object's spec size exceeds 262144 bytes, KubeFleet will not + // be able to use client-side apply with the object as it cannot set + // an last applied configuration annotation of that size. Consequently, + // for this test case, it prepares a configMap object of 600000 bytes so + // that Kubernetes will accept it but CSA cannot use it, forcing the + // work applier to fall back to server-side apply. + randomBytes := make([]byte, 600000) + // Note that this method never returns an error and will always fill the given + // slice completely. + _, _ = rand.Read(randomBytes) + // Encode the random bytes to a base64 string. + randomBase64Str := base64.StdEncoding.EncodeToString(randomBytes) + cm.Data[cmDataKey] = randomBase64Str + + if err := hubClient.Update(ctx, cm); err != nil { + return fmt.Errorf("failed to update configMap: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the manifests") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "1") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on the selected member cluster", func() { + workResourcesPlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster1EastProdName) + }) + + It("should fall back to server-side apply", func() { + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := memberCluster1EastProdClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm); err != nil { + return fmt.Errorf("failed to get configMap: %w", err) + } + + lastAppliedConf, foundAnnotation := cm.Annotations[placementv1beta1.LastAppliedConfigAnnotation] + if foundAnnotation && len(lastAppliedConf) > 0 { + return fmt.Errorf("the configMap object has annotation %s (value: %s) in presence when SSA should be used", placementv1beta1.LastAppliedConfigAnnotation, lastAppliedConf) + } + + foundFieldMgr := false + fieldMgrs := cm.GetManagedFields() + for _, fieldMgr := range fieldMgrs { + // For simplicity reasons, here the test case verifies only against the field + // manager name. + if fieldMgr.Manager == "work-api-agent" { + foundFieldMgr = true + } + } + if !foundFieldMgr { + return fmt.Errorf("the configMap object does not list the KubeFleet member agent as a field manager when SSA should be used") + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to fall back to server-side apply") + }) + + It("can update the manifests", func() { + // Update the configMap to remove the large data piece so that + // client-side apply can be used again. + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm); err != nil { + return fmt.Errorf("failed to get configMap: %w", err) + } + + if cm.Data == nil { + cm.Data = make(map[string]string) + } + cm.Data[cmDataKey] = "" + + if err := hubClient.Update(ctx, cm); err != nil { + return fmt.Errorf("failed to update configMap: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the manifests") + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), []string{memberCluster1EastProdName}, nil, "2") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on the selected member cluster", func() { + workResourcesPlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1EastProd) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster1EastProdName) + }) + + It("should use client-side apply", func() { + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := memberCluster1EastProdClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm); err != nil { + return fmt.Errorf("failed to get configMap: %w", err) + } + + lastAppliedConf, foundAnnotation := cm.Annotations[placementv1beta1.LastAppliedConfigAnnotation] + if !foundAnnotation || len(lastAppliedConf) == 0 { + return fmt.Errorf("the configMap object does not have annotation %s in presence or its value is empty", placementv1beta1.LastAppliedConfigAnnotation) + } + + // Field manager might still be set; this is an expected behavior. + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to fall back to server-side apply") + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, allMemberClusters) + }) + }) + + }) + + Describe("switching apply strategies", func() { + Context("switch from client-side apply to report diff", Ordered, func() { + anotherConfigMapName := types.NamespacedName{Name: fmt.Sprintf("another-config-map-%d", GinkgoParallelProcess()), Namespace: workNamespaceName} + selectedResources := []placementv1beta1.ResourceIdentifier{ + { + Kind: "ConfigMap", + Name: configMapName, + Version: "v1", + Namespace: workNamespaceName, + }, + { + Kind: "ConfigMap", + Name: anotherConfigMapName.Name, + Version: "v1", + Namespace: workNamespaceName, + }, + } + + BeforeAll(func() { + // In the clusterResourcePlacement test, it selects two resources, ns and configMap. + // Similarly, configMap maps to ns while anotherConfigMap maps to configMap. + createConfigMap() + createAnotherConfigMap(anotherConfigMapName) + + cm := appConfigMap() + By(fmt.Sprintf("creating configmap %s/%s on member cluster", cm.Namespace, cm.Name)) + Expect(memberCluster1EastProdClient.Create(ctx, &cm)).Should(Succeed(), "Failed to create configmap") + + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: configMapName, + }, + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: anotherConfigMapName.Name, + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + memberCluster2EastCanaryName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: ptr.To(intstr.FromInt(1)), + MaxSurge: ptr.To(intstr.FromInt(1)), + UnavailablePeriodSeconds: ptr.To(2), + }, + ApplyStrategy: &placementv1beta1.ApplyStrategy{ + ComparisonOption: placementv1beta1.ComparisonOptionTypePartialComparison, + WhenToApply: placementv1beta1.WhenToApplyTypeIfNotDrifted, + Type: placementv1beta1.ApplyStrategyTypeClientSideApply, + WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeNever, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed()) + }) + + It("should update RP status as expected", func() { + buildWantRPStatus := func(rpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + Conditions: rpAppliedFailedConditions(rpGeneration), + SelectedResources: selectedResources, + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: perClusterApplyFailedConditions(rpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "ConfigMap", + Name: configMapName, + Namespace: workNamespaceName, + }, + Condition: metav1.Condition{ + Type: string(placementv1beta1.PerClusterAppliedConditionType), + Status: metav1.ConditionFalse, + Reason: string(workapplier.ApplyOrReportDiffResTypeNotTakenOver), + }, + }, + }, + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: perClusterRolloutCompletedConditions(rpGeneration, true, false), + }, + }, + ObservedResourceIndex: "0", + } + } + + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return err + } + wantRPStatus := buildWantRPStatus(rp.Generation) + + if diff := cmp.Diff(rp.Status, *wantRPStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("can update the manifests", func() { + // updating the second configMap data + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := hubClient.Get(ctx, anotherConfigMapName, cm); err != nil { + return fmt.Errorf("failed to get configmap: %w", err) + } + + cm.Data = map[string]string{"data": "bar"} + if err := hubClient.Update(ctx, cm); err != nil { + return fmt.Errorf("failed to update configmap: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the manifests") + }) + + It("should update RP status as expected", func() { + // The rollout of the previous change will be blocked due to the rollout + // strategy configuration (1 member cluster has failed; 0 clusters are + // allowed to become unavailable). + buildWantRPStatus := func(rpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + Conditions: rpRolloutStuckConditions(rpGeneration), + SelectedResources: selectedResources, + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: perClusterApplyFailedConditions(rpGeneration), + FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "ConfigMap", + Name: configMapName, + Namespace: workNamespaceName, + }, + Condition: metav1.Condition{ + Type: string(placementv1beta1.PerClusterAppliedConditionType), + Status: metav1.ConditionFalse, + Reason: string(workapplier.ApplyOrReportDiffResTypeNotTakenOver), + }, + }, + }, + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: perClusterSyncPendingConditions(rpGeneration), + }, + }, + ObservedResourceIndex: "1", + } + } + + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return err + } + wantRPStatus := buildWantRPStatus(rp.Generation) + + if diff := cmp.Diff(rp.Status, *wantRPStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("can update the apply strategy", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + rp.Spec.Strategy.ApplyStrategy = &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeReportDiff, + WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeNever, + ComparisonOption: placementv1beta1.ComparisonOptionTypePartialComparison, + } + if err := hubClient.Update(ctx, rp); err != nil { + return fmt.Errorf("failed to update RP: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the apply strategy") + }) + + It("should update RP status as expected", func() { + // The rollout of the previous change will be blocked due to the rollout + // strategy configuration (1 member cluster has failed; 0 clusters are + // allowed to become unavailable). + buildWantRPStatus := func(rpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + Conditions: rpDiffReportedConditions(rpGeneration, false), + SelectedResources: selectedResources, + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: perClusterDiffReportedConditions(rpGeneration), + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: perClusterDiffReportedConditions(rpGeneration), + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "ConfigMap", + Name: anotherConfigMapName.Name, + Namespace: workNamespaceName, + }, + TargetClusterObservedGeneration: ptr.To(int64(0)), + ObservedDiffs: []placementv1beta1.PatchDetail{ + { + Path: "/data/data", + ValueInHub: "bar", + ValueInMember: "test", + }, + }, + }, + }, + }, + }, + ObservedResourceIndex: "1", + } + } + + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return err + } + wantRPStatus := buildWantRPStatus(rp.Generation) + + if diff := cmp.Diff(rp.Status, *wantRPStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + AfterAll(func() { + cleanupConfigMapOnCluster(memberCluster1EastProd) + cleanupAnotherConfigMap(anotherConfigMapName) + + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, allMemberClusters) + }) + }) + + Context("switch from report diff to server side apply", Ordered, func() { + BeforeAll(func() { + createConfigMap() + + // Prepare the pre-existing resources. + cm := appConfigMap() + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + cm.Labels[unmanagedLabelKey] = unmanagedLabelVal1 + + By(fmt.Sprintf("creating configmap %s/%s on member cluster", cm.Namespace, cm.Name)) + Expect(memberCluster1EastProdClient.Create(ctx, &cm)).Should(Succeed(), "Failed to create configmap") + + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespaceName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + memberCluster2EastCanaryName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: ptr.To(intstr.FromInt(1)), + MaxSurge: ptr.To(intstr.FromInt(1)), + UnavailablePeriodSeconds: ptr.To(2), + }, + ApplyStrategy: &placementv1beta1.ApplyStrategy{ + ComparisonOption: placementv1beta1.ComparisonOptionTypeFullComparison, + Type: placementv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed()) + }) + + It("should update RP status as expected", func() { + buildWantRPStatus := func(rpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + Conditions: rpDiffReportedConditions(rpGeneration, false), + SelectedResources: appConfigMapIdentifiers(), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + Conditions: perClusterDiffReportedConditions(rpGeneration), + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "ConfigMap", + Name: configMapName, + Namespace: workNamespaceName, + }, + TargetClusterObservedGeneration: ptr.To(int64(0)), + ObservedDiffs: []placementv1beta1.PatchDetail{ + { + Path: fmt.Sprintf("/metadata/labels/%s", unmanagedLabelKey), + ValueInMember: unmanagedLabelVal1, + }, + }, + }, + }, + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: perClusterDiffReportedConditions(rpGeneration), + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "ConfigMap", + Name: configMapName, + Namespace: workNamespaceName, + }, + ObservedDiffs: []placementv1beta1.PatchDetail{ + { + Path: "/", + ValueInHub: "(the whole object)", + }, + }, + }, + }, + }, + }, + ObservedResourceIndex: "0", + } + } + + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return err + } + wantRPStatus := buildWantRPStatus(rp.Generation) + + if diff := cmp.Diff(rp.Status, *wantRPStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("can update the manifests", func() { + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: workNamespaceName}, cm); err != nil { + return fmt.Errorf("failed to get configmap: %w", err) + } + + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + cm.Labels[unmanagedLabelKey] = unmanagedLabelVal1 + if err := hubClient.Update(ctx, cm); err != nil { + return fmt.Errorf("failed to update configmap: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the manifests") + }) + + It("should update RP status as expected", func() { + buildWantRPStatus := func(rpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + Conditions: rpDiffReportedConditions(rpGeneration, false), + SelectedResources: appConfigMapIdentifiers(), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: perClusterDiffReportedConditions(rpGeneration), + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: perClusterDiffReportedConditions(rpGeneration), + DiffedPlacements: []placementv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Version: "v1", + Kind: "ConfigMap", + Name: configMapName, + Namespace: workNamespaceName, + }, + ObservedDiffs: []placementv1beta1.PatchDetail{ + { + Path: "/", + ValueInHub: "(the whole object)", + }, + }, + }, + }, + }, + }, + ObservedResourceIndex: "1", + } + } + + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return err + } + wantRPStatus := buildWantRPStatus(rp.Generation) + + if diff := cmp.Diff(rp.Status, *wantRPStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("can update the apply strategy", func() { + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return fmt.Errorf("failed to get RP: %w", err) + } + + rp.Spec.Strategy.ApplyStrategy = &placementv1beta1.ApplyStrategy{ + Type: placementv1beta1.ApplyStrategyTypeServerSideApply, + WhenToTakeOver: placementv1beta1.WhenToTakeOverTypeAlways, + ComparisonOption: placementv1beta1.ComparisonOptionTypePartialComparison, + ServerSideApplyConfig: &placementv1beta1.ServerSideApplyConfig{ + ForceConflicts: true, + }, + } + if err := hubClient.Update(ctx, rp); err != nil { + return fmt.Errorf("failed to update RP: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the apply strategy") + }) + + It("should update RP status as expected", func() { + buildWantRPStatus := func(rpGeneration int64) *placementv1beta1.PlacementStatus { + return &placementv1beta1.PlacementStatus{ + Conditions: rpRolloutCompletedConditions(rpGeneration, false), + SelectedResources: appConfigMapIdentifiers(), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "1", + Conditions: perClusterRolloutCompletedConditions(rpGeneration, true, false), + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "1", + Conditions: perClusterRolloutCompletedConditions(rpGeneration, true, false), + }, + }, + ObservedResourceIndex: "1", + } + } + + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, rp); err != nil { + return err + } + wantRPStatus := buildWantRPStatus(rp.Generation) + + if diff := cmp.Diff(rp.Status, *wantRPStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespaceName}, allMemberClusters) + }) + }) + }) +}) + +func createAnotherConfigMap(name types.NamespacedName) { + configMap := buildAppConfigMap(name) + Expect(hubClient.Create(ctx, &configMap)).To(Succeed(), "Failed to create config map %s", configMap.Name) +} + +func cleanupAnotherConfigMap(name types.NamespacedName) { + cm := &corev1.ConfigMap{} + err := hubClient.Get(ctx, name, cm) + if err != nil && apierrors.IsNotFound(err) { + return + } + Expect(err).To(Succeed(), "Failed to get config map %s", name) + + Expect(hubClient.Delete(ctx, cm)).To(Succeed(), "Failed to delete config map %s", name) + + Eventually(func() error { + cm := &corev1.ConfigMap{} + err := hubClient.Get(ctx, name, cm) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + return fmt.Errorf("config map %s still exists", name) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to wait for config map %s to be deleted", name) +} diff --git a/test/e2e/resource_placement_pickall_test.go b/test/e2e/resource_placement_pickall_test.go index 5b3df61e8..16ef3300f 100644 --- a/test/e2e/resource_placement_pickall_test.go +++ b/test/e2e/resource_placement_pickall_test.go @@ -40,27 +40,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with PickAll po createWorkResources() // Create the CRP with Namespace-only selector. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: namespaceOnlySelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + createNamespaceOnlyCRP(crpName) By("should update CRP status as expected") crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") diff --git a/test/e2e/resource_placement_ro_test.go b/test/e2e/resource_placement_ro_test.go index 32be49c67..f0a6f29bc 100644 --- a/test/e2e/resource_placement_ro_test.go +++ b/test/e2e/resource_placement_ro_test.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" scheduler "github.com/kubefleet-dev/kubefleet/pkg/scheduler/framework" @@ -37,31 +36,11 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) BeforeEach(OncePerOrdered, func() { - By("creating work resources") - createWorkResources() + By("creating namespace") + createNamespace() // Create the CRP with Namespace-only selector. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: namespaceOnlySelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + createNamespaceOnlyCRP(crpName) By("should update CRP status as expected") crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") @@ -78,27 +57,10 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) BeforeAll(func() { + createConfigMap() + // Create the RP in the same namespace selecting namespaced resources. - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) // Create the ro. ro := &placementv1beta1.ResourceOverride{ @@ -264,6 +226,8 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) BeforeAll(func() { + createConfigMap() + ro := &placementv1beta1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ Name: roName, @@ -303,26 +267,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) // Create the RP. - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) }) AfterAll(func() { @@ -356,27 +301,10 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) BeforeAll(func() { + createConfigMap() + // Create the RP. - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) // Create the ro. ro := &placementv1beta1.ResourceOverride{ @@ -476,7 +404,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv It("should have override annotations on the configmap", func() { for i, cluster := range allMemberClusters { wantAnnotations := map[string]string{roTestAnnotationKey: fmt.Sprintf("%s-%d", roTestAnnotationValue, i)} - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) } }) }) @@ -488,6 +416,8 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) BeforeAll(func() { + createConfigMap() + // Create the bad ro. ro := &placementv1beta1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ @@ -527,26 +457,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) // Create the RP later - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) }) AfterAll(func() { @@ -575,27 +486,10 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) BeforeAll(func() { + createConfigMap() + // Create the RP. - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) // Create the ro. ro := &placementv1beta1.ResourceOverride{ @@ -658,6 +552,8 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) BeforeAll(func() { + createConfigMap() + // Create the ro before rp so that the observed resource index is predictable. ro := &placementv1beta1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ @@ -708,26 +604,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) // Create the RP. - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) }) AfterAll(func() { @@ -773,6 +650,8 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) BeforeAll(func() { + createConfigMap() + // Create the ro before rp so that the observed resource index is predictable. ro := &placementv1beta1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ @@ -825,26 +704,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) // Create the RP. - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) }) AfterAll(func() { @@ -875,7 +735,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv for idx := 0; idx < 2; idx++ { cluster := allMemberClusters[idx] wantAnnotations := map[string]string{roTestAnnotationKey: roTestAnnotationValue} - Expect(validateOverrideAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(cluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", cluster.ClusterName) } }) @@ -897,6 +757,8 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv workNamespace := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) BeforeAll(func() { + createConfigMap() + // Create the ro before rp so that the observed resource index is predictable. ro := &placementv1beta1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ @@ -951,26 +813,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv Expect(hubClient.Create(ctx, ro)).To(Succeed(), "Failed to create resourceOverride %s", roName) // Create the RP. - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) }) AfterAll(func() { @@ -1072,6 +915,8 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv roSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, roName, 0) BeforeAll(func() { + createConfigMap() + // Create the bad ro. ro := &placementv1beta1.ResourceOverride{ ObjectMeta: metav1.ObjectMeta{ @@ -1130,26 +975,7 @@ var _ = Describe("placing namespaced scoped resources using a RP with ResourceOv }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update ro as expected", rpName) // Create the RP later so that failed override won't block the rollout - rp := &placementv1beta1.ResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: rpName, - Namespace: workNamespace, - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: configMapSelector(), - Policy: &placementv1beta1.PlacementPolicy{ - PlacementType: placementv1beta1.PickAllPlacementType, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP") + createRP(workNamespace, rpName) }) AfterAll(func() { diff --git a/test/e2e/resources_test.go b/test/e2e/resources_test.go index 8077903cf..d81e273bf 100644 --- a/test/e2e/resources_test.go +++ b/test/e2e/resources_test.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" fleetnetworkingv1alpha1 "go.goms.io/fleet-networking/api/v1alpha1" @@ -126,10 +127,17 @@ func appNamespace() corev1.Namespace { } func appConfigMap() corev1.ConfigMap { + return buildAppConfigMap(types.NamespacedName{ + Name: fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()), + Namespace: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + }) +} + +func buildAppConfigMap(configMap types.NamespacedName) corev1.ConfigMap { return corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()), - Namespace: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + Name: configMap.Name, + Namespace: configMap.Namespace, }, Data: map[string]string{ "data": "test", diff --git a/test/e2e/updaterun_test.go b/test/e2e/updaterun_test.go index a98aadf22..710efdc48 100644 --- a/test/e2e/updaterun_test.go +++ b/test/e2e/updaterun_test.go @@ -819,8 +819,8 @@ var _ = Describe("test CRP rollout with staged update run", func() { wantCROAnnotations := map[string]string{croTestAnnotationKey: fmt.Sprintf("%s-%d", croTestAnnotationValue, 0)} wantROAnnotations := map[string]string{roTestAnnotationKey: fmt.Sprintf("%s-%d", roTestAnnotationValue, 1)} Expect(validateAnnotationOfWorkNamespaceOnCluster(allMemberClusters[0], wantCROAnnotations)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", allMemberClusters[0].ClusterName) - Expect(validateOverrideAnnotationOfConfigMapOnCluster(allMemberClusters[0], wantCROAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", allMemberClusters[0].ClusterName) - Expect(validateOverrideAnnotationOfConfigMapOnCluster(allMemberClusters[1], wantROAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", allMemberClusters[1].ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(allMemberClusters[0], wantCROAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", allMemberClusters[0].ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(allMemberClusters[1], wantROAnnotations)).Should(Succeed(), "Failed to override the annotation of configmap on %s", allMemberClusters[1].ClusterName) }) }) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 44d3b2871..97a1e18f7 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -700,9 +700,16 @@ func createWorkResource(name, namespace string) { // createWorkResources creates some resources on the hub cluster for testing purposes. func createWorkResources() { + createNamespace() + createConfigMap() +} + +func createNamespace() { ns := appNamespace() Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) +} +func createConfigMap() { configMap := appConfigMap() Expect(hubClient.Create(ctx, &configMap)).To(Succeed(), "Failed to create config map %s", configMap.Name) } @@ -720,6 +727,19 @@ func cleanWorkResourcesOnCluster(cluster *framework.Cluster) { Eventually(workResourcesRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove work resources from %s cluster", cluster.ClusterName) } +// cleanupConfigMap deletes the ConfigMap created by createWorkResources and waits until the resource is not found. +func cleanupConfigMap() { + cleanupConfigMapOnCluster(hubCluster) +} + +func cleanupConfigMapOnCluster(cluster *framework.Cluster) { + configMap := appConfigMap() + Expect(client.IgnoreNotFound(cluster.KubeClient.Delete(ctx, &configMap))).To(Succeed(), "Failed to delete config map %s", configMap.Name) + + configMapRemovedActual := namespacedResourcesRemovedFromClusterActual(cluster) + Eventually(configMapRemovedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove config map from %s cluster", cluster.ClusterName) +} + // setMemberClusterToLeave sets a specific member cluster to leave the fleet. func setMemberClusterToLeave(memberCluster *framework.Cluster) { mcObj := &clusterv1beta1.MemberCluster{ @@ -757,6 +777,41 @@ func createAnotherValidOwnerReference(nsName string) metav1.OwnerReference { } } +func createAnotherValidOwnerReferenceForConfigMap(namespace, configMapName string) metav1.OwnerReference { + // Create a configmap to be owner. + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: namespace, + }, + Data: map[string]string{ + "key": "value", + }, + } + Expect(allMemberClusters[0].KubeClient.Create(ctx, cm)).Should(Succeed(), "Failed to create configmap %s/%s", namespace, configMapName) + + // Get the configmap to ensure to create a valid owner reference. + Expect(allMemberClusters[0].KubeClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: namespace}, cm)).Should(Succeed(), "Failed to get configmap %s/%s", namespace, configMapName) + + return metav1.OwnerReference{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: configMapName, + UID: cm.UID, + } +} + +func cleanupAnotherValidOwnerReferenceForConfigMap(namespace, configMapName string) { + // Cleanup the configmap created for the owner reference. + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: namespace, + }, + } + Expect(allMemberClusters[0].KubeClient.Delete(ctx, cm)).Should(Succeed(), "Failed to delete configmap %s/%s", namespace, configMapName) +} + func cleanupAnotherValidOwnerReference(nsName string) { // Cleanup the namespace created for the owner reference. ns := &corev1.Namespace{ @@ -863,6 +918,28 @@ func checkNamespaceExistsWithOwnerRefOnMemberCluster(nsName, crpName string) { }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Namespace which is not owned by the CRP should not be deleted") } +func checkConfigMapExistsWithOwnerRefOnMemberCluster(namespace, cmName, rpName string) { + Consistently(func() error { + cm := &corev1.ConfigMap{} + if err := allMemberClusters[0].KubeClient.Get(ctx, types.NamespacedName{Name: cmName, Namespace: namespace}, cm); err != nil { + return fmt.Errorf("failed to get configmap %s/%s: %w", namespace, cmName, err) + } + + if len(cm.OwnerReferences) > 0 { + for _, ownerRef := range cm.OwnerReferences { + if ownerRef.APIVersion == placementv1beta1.GroupVersion.String() && + ownerRef.Kind == placementv1beta1.AppliedWorkKind && + ownerRef.Name == fmt.Sprintf("%s.%s-work", namespace, rpName) { + if *ownerRef.BlockOwnerDeletion { + return fmt.Errorf("configmap %s/%s owner reference for AppliedWork should have been updated to have BlockOwnerDeletion set to false", namespace, cmName) + } + } + } + } + return nil + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "ConfigMap which is not owned by the RP should not be deleted") +} + // cleanupPlacement deletes the placement and waits until the resources are not found. func cleanupPlacement(placementKey types.NamespacedName) { // TODO(Arvindthiru): There is a conflict which requires the Eventually block, not sure of series of operations that leads to it yet. @@ -1283,7 +1360,7 @@ func checkIfOverrideAnnotationsOnAllMemberClusters(includeNamespace bool, wantAn if includeNamespace { Expect(validateAnnotationOfWorkNamespaceOnCluster(memberCluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of work namespace on %s", memberCluster.ClusterName) } - Expect(validateOverrideAnnotationOfConfigMapOnCluster(memberCluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of config map on %s", memberCluster.ClusterName) + Expect(validateAnnotationOfConfigMapOnCluster(memberCluster, wantAnnotations)).Should(Succeed(), "Failed to override the annotation of config map on %s", memberCluster.ClusterName) } } @@ -1389,17 +1466,18 @@ func buildOwnerReference(cluster *framework.Cluster, crpName string) *metav1.Own } } -// createCRPWithApplyStrategy creates a ClusterResourcePlacement with the given name and apply strategy. -func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1.ApplyStrategy) { - crp := &placementv1beta1.ClusterResourcePlacement{ +// createRPWithApplyStrategy creates a ResourcePlacement with the given name and apply strategy. +func createRPWithApplyStrategy(rpNamespace, rpName string, applyStrategy *placementv1beta1.ApplyStrategy) { + rp := &placementv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ - Name: crpName, + Name: rpName, + Namespace: rpNamespace, // Add a custom finalizer; this would allow us to better observe // the behavior of the controllers. Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: workResourceSelector(), + ResourceSelectors: configMapSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -1409,19 +1487,17 @@ func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1. }, } if applyStrategy != nil { - crp.Spec.Strategy.ApplyStrategy = applyStrategy + rp.Spec.Strategy.ApplyStrategy = applyStrategy } - By(fmt.Sprintf("creating placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + By(fmt.Sprintf("creating placement %s", rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s", rpName) } -// createCRP creates a ClusterResourcePlacement with the given name. -func createCRP(crpName string) { - createCRPWithApplyStrategy(crpName, nil) -} - -// createNamespaceOnlyCRP creates a ClusterResourcePlacement with namespace-only selector. -func createNamespaceOnlyCRP(crpName string) { +// createCRPWithApplyStrategy creates a ClusterResourcePlacement with the given name and apply strategy. +func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1.ApplyStrategy, resourceSelectors []placementv1beta1.ResourceSelectorTerm) { + if resourceSelectors == nil { + resourceSelectors = workResourceSelector() + } crp := &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, @@ -1430,7 +1506,7 @@ func createNamespaceOnlyCRP(crpName string) { Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: namespaceOnlySelector(), + ResourceSelectors: resourceSelectors, Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -1439,8 +1515,26 @@ func createNamespaceOnlyCRP(crpName string) { }, }, } - By(fmt.Sprintf("creating namespace-only placement %s", crpName)) - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create namespace-only CRP %s", crpName) + if applyStrategy != nil { + crp.Spec.Strategy.ApplyStrategy = applyStrategy + } + By(fmt.Sprintf("creating placement %s", crpName)) + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) +} + +// createRP creates a ResourcePlacement with the given name. +func createRP(rpNamespace, rpName string) { + createRPWithApplyStrategy(rpNamespace, rpName, nil) +} + +// createCRP creates a ClusterResourcePlacement with the given name. +func createCRP(crpName string) { + createCRPWithApplyStrategy(crpName, nil, nil) +} + +// createNamespaceOnlyCRP creates a ClusterResourcePlacement with namespace-only selector. +func createNamespaceOnlyCRP(crpName string) { + createCRPWithApplyStrategy(crpName, nil, namespaceOnlySelector()) } // ensureUpdateRunDeletion deletes the update run with the given name and checks all related approval requests are also deleted. @@ -1495,6 +1589,9 @@ func ensureRPAndRelatedResourcesDeleted(rpKey types.NamespacedName, memberCluste // Remove the custom deletion blocker finalizer from the ResourcePlacement. cleanupPlacement(rpKey) + + // Delete the created resources. + cleanupConfigMap() } func retrievePlacement(placementKey types.NamespacedName) (placementv1beta1.PlacementObj, error) { From afed79a3570d898fe3f4701e4913d98f34e4444b Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Fri, 29 Aug 2025 05:41:49 +0800 Subject: [PATCH 5/9] test: add taint and toleration tests for RP (#228) Signed-off-by: michaelawyu --- ...esource_placement_taint_toleration_test.go | 392 ++++++++++++++++++ test/e2e/utils_test.go | 18 + 2 files changed, 410 insertions(+) create mode 100644 test/e2e/resource_placement_taint_toleration_test.go diff --git a/test/e2e/resource_placement_taint_toleration_test.go b/test/e2e/resource_placement_taint_toleration_test.go new file mode 100644 index 000000000..c2bd145da --- /dev/null +++ b/test/e2e/resource_placement_taint_toleration_test.go @@ -0,0 +1,392 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/test/e2e/framework" +) + +var _ = Describe("validating RP for taint and toleration features", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + rpKey := types.NamespacedName{Name: rpName, Namespace: appNamespace().Name} + + BeforeEach(OncePerOrdered, func() { + // Create the resources. + createNamespace() + + // Create the CRP with Namespace-only selector. + createNamespaceOnlyCRP(crpName) + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + + }) + + AfterEach(OncePerOrdered, func() { + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Describe("placing resource using a resource placement with pickFixed placement policy specified, taint clusters, pick all specified clusters", Serial, Ordered, func() { + BeforeAll(func() { + // Create the resources. + createConfigMap() + // Add taint to all member clusters. + addTaintsToMemberClusters(allMemberClusterNames, buildTaints(allMemberClusterNames)) + + // Create the RP with pickFixed policy specifying all clusters. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: allMemberClusterNames, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create resource placement") + }) + + It("should update resource placement status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should place resources on specified clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + AfterAll(func() { + // Remove taint from all member clusters. + removeTaintsFromMemberClusters(allMemberClusterNames) + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + }) + }) + + Describe("placing resources using a resource placement with no placement policy specified, taint clusters, update resource placement with tolerations", Serial, Ordered, func() { + var taintClusterNames, noTaintClusterNames []string + var taintClusters, noTaintClusters []*framework.Cluster + + BeforeAll(func() { + taintClusterNames = []string{memberCluster1EastProdName, memberCluster2EastCanaryName} + taintClusters = []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + + noTaintClusterNames = []string{memberCluster3WestProdName} + noTaintClusters = []*framework.Cluster{memberCluster3WestProd} + + // Create the resources. + createConfigMap() + // Add taint to member clusters 1, 2. + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + + // Create the RP with no placement policy. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create resource placement") + }) + + It("should update resource placement status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), noTaintClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should ensure no configMap exist on member clusters with taint", func() { + checkIfRemovedConfigMapFromMemberClusters(taintClusters) + }) + + It("should place resources on the selected cluster without taint", func() { + for _, cluster := range noTaintClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on selected cluster") + } + }) + + It("should update resource placement spec with tolerations for tainted cluster", func() { + // update RP with toleration for member cluster 1,2. + updateRPWithTolerations(rpKey, buildTolerations(taintClusterNames)) + }) + + It("should update resource placement status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + AfterAll(func() { + // Remove taint from member cluster 1,2. + removeTaintsFromMemberClusters(taintClusterNames) + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + }) + }) + + Describe("placing resources using a resource placement with no placement policy specified, taint clusters, remove taints from cluster, all cluster should be picked", Serial, Ordered, func() { + var taintClusterNames, noTaintClusterNames []string + var taintClusters, noTaintClusters []*framework.Cluster + + BeforeAll(func() { + taintClusterNames = []string{memberCluster1EastProdName, memberCluster2EastCanaryName} + taintClusters = []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + + noTaintClusterNames = []string{memberCluster3WestProdName} + noTaintClusters = []*framework.Cluster{memberCluster3WestProd} + + // Create the resources. + createConfigMap() + // Add taint to member clusters 1, 2. + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + + // Create the RP with no placement policy. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create resource placement") + }) + + It("should update resource placement status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), noTaintClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should ensure no configMap exist on member clusters with taint", func() { + checkIfRemovedConfigMapFromMemberClusters(taintClusters) + }) + + It("should place resources on the selected cluster without taint", func() { + for _, cluster := range noTaintClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on selected cluster") + } + }) + + It("should remove taints from member clusters", func() { + // Remove taint from member cluster 1,2. + removeTaintsFromMemberClusters(taintClusterNames) + }) + + It("should update resource placement status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + }) + }) + + Describe("picking N clusters with affinities and topology spread constraints, taint clusters, create resource placement with toleration for one cluster", Serial, Ordered, func() { + var taintClusterNames, tolerateClusterNames, unSelectedClusterNames []string + var tolerateClusters, unSelectedClusters []*framework.Cluster + + BeforeAll(func() { + taintClusterNames = []string{memberCluster1EastProdName, memberCluster2EastCanaryName} + + tolerateClusterNames = []string{memberCluster1EastProdName} + tolerateClusters = []*framework.Cluster{memberCluster1EastProd} + + unSelectedClusterNames = []string{memberCluster2EastCanaryName} + unSelectedClusters = []*framework.Cluster{memberCluster2EastCanary} + + // Create the resources. + createConfigMap() + // Add taint to member cluster 1, 2. + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + + // Create the RP, with toleration for member cluster 1. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + // Note that due to limitations in the E2E environment, specifically the limited + // number of clusters available, the affinity and topology spread constraints + // specified here are validated only on a very superficial level, i.e., the flow + // functions. For further evaluations, specifically the correctness check + // of the affinity and topology spread constraint logic, see the scheduler + // integration tests. + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + regionLabelName: regionEast, + }, + }, + }, + }, + }, + }, + }, + TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ + { + MaxSkew: ptr.To(int32(1)), + TopologyKey: envLabelName, + WhenUnsatisfiable: placementv1beta1.DoNotSchedule, + }, + }, + Tolerations: buildTolerations(tolerateClusterNames), + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create resource placement") + }) + + It("should update resource placement status as expected", func() { + // we choose two clusters using a label. + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), tolerateClusterNames, unSelectedClusterNames, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should place resources on the selected clusters with tolerated taint", func() { + for _, cluster := range tolerateClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the selected clusters") + } + }) + + It("should ensure no configMap exist on member clusters with untolerated taint", func() { + checkIfRemovedConfigMapFromMemberClusters(unSelectedClusters) + }) + + AfterAll(func() { + // Remove taint from member cluster 1, 2. + removeTaintsFromMemberClusters(taintClusterNames) + ensureRPAndRelatedResourcesDeleted(rpKey, tolerateClusters) + }) + }) + + Describe("picking all clusters using pickAll placement policy, add taint to a cluster that's already selected", Serial, Ordered, func() { + taintClusterNames := []string{memberCluster1EastProdName} + + BeforeAll(func() { + // Create the resources. + createConfigMap() + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + }, + ResourceSelectors: configMapSelector(), + }, + } + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create resource placement") + }) + + It("should update resource placement status as expected", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should place resources on the selected clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should add taint to a cluster that's already selected", func() { + // Add taint to member cluster 1. + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + }) + + It("should still update resource placement status as expected, no status updates", func() { + statusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement status as expected") + }) + + It("should still place resources on the selected clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + AfterAll(func() { + // Remove taint from member cluster 1. + removeTaintsFromMemberClusters(taintClusterNames) + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) + }) + }) +}) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 97a1e18f7..7986e70bd 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -1323,6 +1323,24 @@ func updateCRPWithTolerations(tolerations []placementv1beta1.Toleration) { }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement with tolerations %s", crpName) } +func updateRPWithTolerations(rpKey types.NamespacedName, tolerations []placementv1beta1.Toleration) { + Eventually(func() error { + var rp placementv1beta1.ResourcePlacement + err := hubClient.Get(ctx, rpKey, &rp) + if err != nil { + return err + } + if rp.Spec.Policy == nil { + rp.Spec.Policy = &placementv1beta1.PlacementPolicy{ + Tolerations: tolerations, + } + } else { + rp.Spec.Policy.Tolerations = tolerations + } + return hubClient.Update(ctx, &rp) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update resource placement with tolerations %s", rpKey) +} + func cleanupClusterResourceOverride(name string) { cro := &placementv1beta1.ClusterResourceOverride{ ObjectMeta: metav1.ObjectMeta{ From 9239b274623241c64d2ce357cbfb4a619bd4c56a Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Fri, 29 Aug 2025 08:59:47 +0800 Subject: [PATCH 6/9] fix: validate if the resource should be placed (#229) Signed-off-by: Zhiying Lin Signed-off-by: michaelawyu --- .../placement/resource_selector.go | 57 +++-- .../placement/resource_selector_test.go | 212 +++++++++++++++++- test/utils/informer/manager.go | 6 + 3 files changed, 253 insertions(+), 22 deletions(-) diff --git a/pkg/controllers/placement/resource_selector.go b/pkg/controllers/placement/resource_selector.go index 7feb7a516..057acaa10 100644 --- a/pkg/controllers/placement/resource_selector.go +++ b/pkg/controllers/placement/resource_selector.go @@ -284,13 +284,15 @@ func (r *Reconciler) fetchResources(selector fleetv1beta1.ResourceSelectorTerm, klog.ErrorS(err, "Cannot get the resource", "gvr", gvr, "name", selector.Name, "namespace", placementKey.Namespace) return nil, controller.NewAPIServerError(true, client.IgnoreNotFound(err)) } - if uObj := obj.DeepCopyObject().(*unstructured.Unstructured); uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted resource - klog.V(2).InfoS("Skip the deleting resource by the selector", - "selector", selector, "placement", placementKey, "resourceName", uObj.GetName()) - return []runtime.Object{}, nil + + shouldInclude, err := r.shouldPropagateObj(placementKey.Namespace, placementKey.Name, obj) + if err != nil { + return nil, err + } + if shouldInclude { + return []runtime.Object{obj}, nil } - return []runtime.Object{obj}, nil + return []runtime.Object{}, nil } var labelSelector labels.Selector @@ -319,18 +321,39 @@ func (r *Reconciler) fetchResources(selector fleetv1beta1.ResourceSelectorTerm, // go ahead and claim all objects by adding a finalizer and insert the placement in its annotation for i := 0; i < len(objects); i++ { - if uObj := objects[i].DeepCopyObject().(*unstructured.Unstructured); uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted resource - klog.V(2).InfoS("Skip the deleting resource by the selector", - "selector", selector, "placement", placementKey, "resourceName", uObj.GetName()) - continue + shouldInclude, err := r.shouldPropagateObj(placementKey.Namespace, placementKey.Name, objects[i]) + if err != nil { + return nil, err + } + if shouldInclude { + selectedObjs = append(selectedObjs, objects[i]) } - selectedObjs = append(selectedObjs, objects[i]) } return selectedObjs, nil } +func (r *Reconciler) shouldPropagateObj(namespace, placementName string, obj runtime.Object) (bool, error) { + uObj := obj.DeepCopyObject().(*unstructured.Unstructured) + uObjKObj := klog.KObj(uObj) + if uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("Skip the deleting resource by the selector", "namespace", namespace, "placement", placementName, "object", uObjKObj) + return false, nil + } + + shouldInclude, err := utils.ShouldPropagateObj(r.InformerManager, uObj) + if err != nil { + klog.ErrorS(err, "Cannot determine if we should propagate an object", "namespace", namespace, "placement", placementName, "object", uObjKObj) + return false, err + } + if !shouldInclude { + klog.V(2).InfoS("Skip the resource by the selector which is forbidden", "namespace", namespace, "placement", placementName, "object", uObjKObj) + return false, nil + } + return true, nil +} + // fetchNamespaceResources retrieves all the objects for a ResourceSelectorTerm that is for namespace. func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ResourceSelectorTerm, placementName string) ([]runtime.Object, error) { klog.V(2).InfoS("start to fetch the namespace resources by the selector", "selector", selector) @@ -418,16 +441,8 @@ func (r *Reconciler) fetchAllResourcesInOneNamespace(namespaceName string, place return nil, controller.NewAPIServerError(true, err) } for _, obj := range objs { - uObj := obj.DeepCopyObject().(*unstructured.Unstructured) - if uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted resource - klog.V(2).InfoS("skip the deleting resource by the selector", - "placeName", placeName, "namespace", namespaceName, "object", klog.KObj(uObj)) - continue - } - shouldInclude, err := utils.ShouldPropagateObj(r.InformerManager, uObj) + shouldInclude, err := r.shouldPropagateObj(namespaceName, placeName, obj) if err != nil { - klog.ErrorS(err, "cannot determine if we should propagate an object", "object", klog.KObj(uObj)) return nil, err } if shouldInclude { diff --git a/pkg/controllers/placement/resource_selector_test.go b/pkg/controllers/placement/resource_selector_test.go index bc1628f40..3825eccbe 100644 --- a/pkg/controllers/placement/resource_selector_test.go +++ b/pkg/controllers/placement/resource_selector_test.go @@ -841,6 +841,23 @@ func TestGatherSelectedResource(t *testing.T) { } testConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) + // Common test endpoints object used across multiple test cases. + testEndpoints := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Endpoints", + "metadata": map[string]interface{}{ + "name": "test-endpoints", + "namespace": "test-ns", + }, + }, + } + testEndpoints.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Endpoints", + }) + kubeRootCAConfigMap := &unstructured.Unstructured{ // reserved configmap object Object: map[string]interface{}{ "apiVersion": "v1", @@ -864,6 +881,7 @@ func TestGatherSelectedResource(t *testing.T) { "deletionTimestamp": "2025-01-01T00:00:00Z", "labels": map[string]interface{}{ "tier": "api", + "app": "frontend", }, }, }, @@ -1015,6 +1033,20 @@ func TestGatherSelectedResource(t *testing.T) { resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed want: nil, }, + { + name: "should skip disabled resources for resource placement", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed + want: nil, + }, { name: "should return error for cluster-scoped resource", placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, @@ -1180,7 +1212,7 @@ func TestGatherSelectedResource(t *testing.T) { return &testinformer.FakeManager{ IsClusterScopedResource: true, Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment, testDeletingDeployment}}, }, } }(), @@ -1663,6 +1695,179 @@ func TestGatherSelectedResource(t *testing.T) { }(), wantError: controller.ErrUnexpectedBehavior, }, + { + name: "should skip reserved resources for namespaced placement", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "kube-root-ca.crt", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ConfigMapGVR: {Objects: []runtime.Object{kubeRootCAConfigMap}}, + }, + } + }(), + want: nil, // should not propagate reserved configmap + }, + { + name: "should skip reserved resources for namespaced placement when selecting all the configMaps", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ConfigMapGVR: {Objects: []runtime.Object{kubeRootCAConfigMap, testConfigMap}}, + }, + } + }(), + want: []*unstructured.Unstructured{testConfigMap}, // should not propagate reserved configmap + }, + { + name: "should return error when informer cache is not synced for namespaced placement", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + InformerSynced: ptr.To(false), + } + }(), + wantError: controller.ErrExpectedBehavior, + }, + { + name: "should return error when informer cache is not synced for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole}}, + }, + InformerSynced: ptr.To(false), + } + }(), + wantError: controller.ErrExpectedBehavior, + }, + { + name: "should return error when informer cache is not synced for cluster scoped placement with namespace resources", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + APIResources: map[schema.GroupVersionKind]bool{ + utils.NamespaceGVK: true, + }, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, + InformerSynced: ptr.To(false), + } + }(), + wantError: controller.ErrExpectedBehavior, + }, + { + name: "should return error when shouldPropagateObj returns error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Endpoints", + Name: "test-endpoints", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + {Group: "", Version: "v1", Resource: "endpoints"}: { + Objects: []runtime.Object{testEndpoints}, + }, + utils.ServiceGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, + { + name: "should return error by selecting all the endpoints when shouldPropagateObj returns error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Endpoints", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + {Group: "", Version: "v1", Resource: "endpoints"}: { + Objects: []runtime.Object{testEndpoints}, + }, + utils.ServiceGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: controller.ErrUnexpectedBehavior, + }, } for _, tt := range tests { @@ -1711,6 +1916,9 @@ func newFakeRESTMapper() *fakeRESTMapper { {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: { Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, }, + {Group: "", Kind: "Endpoints"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"}, + }, }, } } @@ -1748,6 +1956,8 @@ func (f *fakeRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.G return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, nil case resource.Group == "" && resource.Resource == "nodes": return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, nil + case resource.Group == "" && resource.Resource == "endpoints": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"}, nil } return schema.GroupVersionKind{}, errors.New("kind not found") } diff --git a/test/utils/informer/manager.go b/test/utils/informer/manager.go index 0a201573f..b67c8fc6a 100644 --- a/test/utils/informer/manager.go +++ b/test/utils/informer/manager.go @@ -125,6 +125,9 @@ type FakeManager struct { Listers map[schema.GroupVersionResource]*FakeLister // NamespaceScopedResources is the list of namespace-scoped resources for testing. NamespaceScopedResources []schema.GroupVersionResource + // InformerSynced controls whether IsInformerSynced returns true or false. + // If nil, defaults to true. If set, returns the value for all resources. + InformerSynced *bool } func (m *FakeManager) AddDynamicResources(_ []informer.APIResourceMeta, _ cache.ResourceEventHandler, _ bool) { @@ -134,6 +137,9 @@ func (m *FakeManager) AddStaticResource(_ informer.APIResourceMeta, _ cache.Reso } func (m *FakeManager) IsInformerSynced(_ schema.GroupVersionResource) bool { + if m.InformerSynced != nil { + return *m.InformerSynced + } return true } From b32db86c23e2ebaaa65c10198c95f773f39699fd Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Fri, 29 Aug 2025 15:39:36 +1000 Subject: [PATCH 7/9] test: set up the binding watcher integration tests to wait for cache sync (#231) * Minor fixes Signed-off-by: michaelawyu * Minor fixes Signed-off-by: michaelawyu * Revert the timeout change in #99 Signed-off-by: michaelawyu * Use eventually block Signed-off-by: michaelawyu * Experimental Signed-off-by: michaelawyu * Revert experimental changes Signed-off-by: michaelawyu --------- Signed-off-by: michaelawyu --- pkg/controllers/bindingwatcher/suite_test.go | 9 +++++++++ .../bindingwatcher/watcher_integration_test.go | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/bindingwatcher/suite_test.go b/pkg/controllers/bindingwatcher/suite_test.go index 592f1cccf..9bb64d759 100644 --- a/pkg/controllers/bindingwatcher/suite_test.go +++ b/pkg/controllers/bindingwatcher/suite_test.go @@ -20,6 +20,7 @@ import ( "flag" "path/filepath" "testing" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -122,6 +123,14 @@ var _ = BeforeSuite(func() { err = mgr.Start(ctx) Expect(err).Should(Succeed(), "failed to run manager") }() + + // Note (chenyu1): for the binding watcher integration tests, must wait for the cache to sync + // before moving onto the test stage, otherwise some Update events might not be caught properly. + // + // This is wrapped in an Eventually block as the manager might not have started yet. + Eventually(func() bool { + return mgr.GetCache().WaitForCacheSync(ctx) + }, time.Second*10, time.Second*2).To(BeTrue(), "failed to wait for cache to sync") }) var _ = AfterSuite(func() { diff --git a/pkg/controllers/bindingwatcher/watcher_integration_test.go b/pkg/controllers/bindingwatcher/watcher_integration_test.go index db169ce42..6fc857597 100644 --- a/pkg/controllers/bindingwatcher/watcher_integration_test.go +++ b/pkg/controllers/bindingwatcher/watcher_integration_test.go @@ -40,7 +40,7 @@ const ( testReason1 = "testReason1" testReason2 = "testReason2" - eventuallyTimeout = time.Second * 20 + eventuallyTimeout = time.Second * 10 consistentlyDuration = time.Second * 10 interval = time.Millisecond * 250 ) From da7297dfbbe3de9760a6883b0bdd3d885cc8c965 Mon Sep 17 00:00:00 2001 From: Zhiying Lin <54013513+zhiying-lin@users.noreply.github.com> Date: Sun, 31 Aug 2025 05:03:25 +0800 Subject: [PATCH 8/9] test: add RP test for join and leave flow (#233) Signed-off-by: michaelawyu --- test/e2e/enveloped_object_placement_test.go | 16 + test/e2e/join_and_leave_test.go | 473 ++++++++++++------ .../resource_placement_apply_strategy_test.go | 10 +- test/e2e/utils_test.go | 15 +- 4 files changed, 337 insertions(+), 177 deletions(-) diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index 587ca2cd7..f4bd6d52a 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -27,6 +27,7 @@ import ( appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -808,6 +809,21 @@ func createWrappedResourcesForEnvelopTest() { Expect(hubClient.Create(ctx, &testClusterResourceEnvelope)).To(Succeed(), "Failed to create ClusterResourceEnvelope") } +func cleanupWrappedResourcesForEnvelopTest() { + By("deleting namespace resources") + cleanupWorkResources() + + By(fmt.Sprintf("deleting envelope %s", testClusterResourceEnvelope.Name)) + Expect(client.IgnoreNotFound(hubClient.Delete(ctx, &testClusterResourceEnvelope))).To(Succeed(), "Failed to delete testClusterResourceEnvelope") + + Eventually(func() error { + if err := hubClient.Get(ctx, types.NamespacedName{Name: testClusterResourceEnvelope.Name}, &placementv1beta1.ClusterResourceEnvelope{}); !errors.IsNotFound(err) { + return fmt.Errorf("testClusterResourceEnvelope still exists or an unexpected error occurred: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove testClusterResourceEnvelope from hub cluster") +} + func checkAllResourcesPlacement(memberCluster *framework.Cluster) func() error { workNamespaceName := appNamespace().Name return func() error { diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index b1922676c..7cb4fab88 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -42,206 +42,347 @@ const ( memberAgentName = "member-agent" ) -// Note that this container cannot run in parallel with other containers. -var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), Ordered, Serial, func() { +var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) internalServiceExportName := fmt.Sprintf("internal-service-export-%d", GinkgoParallelProcess()) - var wantSelectedResources []placementv1beta1.ResourceIdentifier - BeforeAll(func() { - // Create the test resources. + rpKey := types.NamespacedName{Name: rpName, Namespace: workNamespaceName} + var wantCRPSelectedResources []placementv1beta1.ResourceIdentifier + var wantRPSelectedResources []placementv1beta1.ResourceIdentifier + + BeforeEach(OncePerOrdered, func() { readEnvelopTestManifests() - wantSelectedResources = []placementv1beta1.ResourceIdentifier{ - { - Kind: "Namespace", - Name: workNamespaceName, - Version: "v1", - }, - { - Kind: "ConfigMap", - Name: testConfigMap.Name, - Version: "v1", - Namespace: workNamespaceName, - }, - { - Group: placementv1beta1.GroupVersion.Group, - Kind: placementv1beta1.ResourceEnvelopeKind, - Version: placementv1beta1.GroupVersion.Version, - Name: testResourceEnvelope.Name, - Namespace: workNamespaceName, - }, - { - Group: placementv1beta1.GroupVersion.Group, - Kind: placementv1beta1.ClusterResourceEnvelopeKind, - Version: placementv1beta1.GroupVersion.Version, - Name: testClusterResourceEnvelope.Name, - }, - } + + By("creating the test resources") + createWrappedResourcesForEnvelopTest() + }) + + AfterEach(OncePerOrdered, func() { + By("deleting the test resources") + cleanupWrappedResourcesForEnvelopTest() }) - Context("Test cluster join and leave flow with CRP not deleted", Label("joinleave"), Ordered, Serial, func() { - It("Create the test resources in the namespace", createWrappedResourcesForEnvelopTest) + // Note that this container cannot run in parallel with other containers. + Describe("Test member cluster join and leave flow for cluster resource placement", Ordered, Serial, func() { - It("Create the CRP that select the name space and place it to all clusters", func() { - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, + BeforeAll(func() { + // Create the test resources. + wantCRPSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", }, - Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ - { - Group: "", - Kind: "Namespace", - Version: "v1", - Name: workNamespaceName, - }, - { - Group: placementv1beta1.GroupVersion.Group, - Kind: "ClusterResourceEnvelope", - Version: placementv1beta1.GroupVersion.Version, - Name: testClusterResourceEnvelope.Name, - }, - }, - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: ptr.To(2), - }, - }, + { + Kind: "ConfigMap", + Name: testConfigMap.Name, + Version: "v1", + Namespace: workNamespaceName, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testResourceEnvelope.Name, + Namespace: workNamespaceName, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, }, - } - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") - }) - - It("should update CRP status as expected", func() { - crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) - Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - }) - - It("should place the resources on all member clusters", func() { - for idx := range allMemberClusters { - memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) - Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) - It("create a dummy internalServiceExport in the reserved member namespace", func() { - for idx := range allMemberClusterNames { - memberCluster := allMemberClusters[idx] - namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName) - internalServiceExport := &fleetnetworkingv1alpha1.InternalServiceExport{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespaceName, - Name: internalServiceExportName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, + Context("Test cluster join and leave flow with CRP not deleted", Label("joinleave"), Ordered, Serial, func() { + It("Create the CRP that select the name space and place it to all clusters", func() { + resourceSelectors := []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: workNamespaceName, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: "ClusterResourceEnvelope", + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, }, - Spec: fleetnetworkingv1alpha1.InternalServiceExportSpec{ - ServiceReference: fleetnetworkingv1alpha1.ExportedObjectReference{ - NamespacedName: "test-namespace/test-svc", - ClusterID: memberCluster.ClusterName, - Kind: "Service", - Namespace: "test-namespace", - Name: "test-svc", - ResourceVersion: "0", - UID: "00000000-0000-0000-0000-000000000000", + } + createCRPWithApplyStrategy(crpName, nil, resourceSelectors) + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantCRPSelectedResources, allMemberClusterNames, nil, "0", true) + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("create a dummy internalServiceExport in the reserved member namespace", func() { + for idx := range allMemberClusterNames { + memberCluster := allMemberClusters[idx] + namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName) + internalServiceExport := &fleetnetworkingv1alpha1.InternalServiceExport{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespaceName, + Name: internalServiceExportName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, }, - Ports: []fleetnetworkingv1alpha1.ServicePort{ - { - Protocol: corev1.ProtocolTCP, - Port: 4848, + Spec: fleetnetworkingv1alpha1.InternalServiceExportSpec{ + ServiceReference: fleetnetworkingv1alpha1.ExportedObjectReference{ + NamespacedName: "test-namespace/test-svc", + ClusterID: memberCluster.ClusterName, + Kind: "Service", + Namespace: "test-namespace", + Name: "test-svc", + ResourceVersion: "0", + UID: "00000000-0000-0000-0000-000000000000", + }, + Ports: []fleetnetworkingv1alpha1.ServicePort{ + { + Protocol: corev1.ProtocolTCP, + Port: 4848, + }, }, }, - }, + } + Expect(hubClient.Create(ctx, internalServiceExport)).To(Succeed(), "Failed to create internalServiceExport") } - Expect(hubClient.Create(ctx, internalServiceExport)).To(Succeed(), "Failed to create internalServiceExport") - } - }) - - It("Should fail the unjoin requests", func() { - for idx := range allMemberClusters { - memberCluster := allMemberClusters[idx] - mcObj := &clusterv1beta1.MemberCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: memberCluster.ClusterName, - }, + }) + + It("Should fail the unjoin requests", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + mcObj := &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: memberCluster.ClusterName, + }, + } + err := hubClient.Delete(ctx, mcObj) + Expect(err).ShouldNot(Succeed(), "Want the deletion to be denied") + var statusErr *apierrors.StatusError + Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete memberCluster call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&apierrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("Please delete serviceExport test-namespace/test-svc in the member cluster before leaving, request is denied")) } - err := hubClient.Delete(ctx, mcObj) - Expect(err).ShouldNot(Succeed(), "Want the deletion to be denied") - var statusErr *apierrors.StatusError - Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Delete memberCluster call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&apierrors.StatusError{}))) - Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("Please delete serviceExport test-namespace/test-svc in the member cluster before leaving, request is denied")) - } - }) + }) - It("Deleting the internalServiceExports", func() { - for idx := range allMemberClusterNames { - memberCluster := allMemberClusters[idx] - namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName) + It("Deleting the internalServiceExports", func() { + for idx := range allMemberClusterNames { + memberCluster := allMemberClusters[idx] + namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName) - internalSvcExportKey := types.NamespacedName{Namespace: namespaceName, Name: internalServiceExportName} - var export fleetnetworkingv1alpha1.InternalServiceExport - Expect(hubClient.Get(ctx, internalSvcExportKey, &export)).Should(Succeed(), "Failed to get internalServiceExport") - Expect(hubClient.Delete(ctx, &export)).To(Succeed(), "Failed to delete internalServiceExport") - } + internalSvcExportKey := types.NamespacedName{Namespace: namespaceName, Name: internalServiceExportName} + var export fleetnetworkingv1alpha1.InternalServiceExport + Expect(hubClient.Get(ctx, internalSvcExportKey, &export)).Should(Succeed(), "Failed to get internalServiceExport") + Expect(hubClient.Delete(ctx, &export)).To(Succeed(), "Failed to delete internalServiceExport") + } + }) + + It("Should be able to trigger the member cluster DELETE", func() { + setAllMemberClustersToLeave() + }) + + It("Removing the finalizer from the internalServiceExport", func() { + for idx := range allMemberClusterNames { + memberCluster := allMemberClusters[idx] + namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName) + + internalSvcExportKey := types.NamespacedName{Namespace: namespaceName, Name: internalServiceExportName} + var export fleetnetworkingv1alpha1.InternalServiceExport + Expect(hubClient.Get(ctx, internalSvcExportKey, &export)).Should(Succeed(), "Failed to get internalServiceExport") + export.Finalizers = nil + Expect(hubClient.Update(ctx, &export)).To(Succeed(), "Failed to update internalServiceExport") + } + }) + + It("Should be able to unjoin a cluster with crp still running", func() { + checkIfAllMemberClustersHaveLeft() + }) + + It("Should update CRP status to not placing any resources since all clusters are left", func() { + // resourceQuota is enveloped so it's not trackable yet + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantCRPSelectedResources, nil, nil, "0", false) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("Validating if the resources are still on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) + Consistently(workResourcesPlacedActual, 3*consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("Should be able to rejoin the cluster", func() { + By("rejoin all the clusters without deleting the CRP") + setAllMemberClustersToJoin() + checkIfAllMemberClustersHaveJoined() + checkIfAzurePropertyProviderIsWorking() + }) + + It("should update CRP status to applied to all clusters again automatically after rejoining", func() { + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantCRPSelectedResources, allMemberClusterNames, nil, "0", true) + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) }) - It("Should be able to trigger the member cluster DELETE", func() { - setAllMemberClustersToLeave() + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) + }) - It("Removing the finalizer from the internalServiceExport", func() { - for idx := range allMemberClusterNames { - memberCluster := allMemberClusters[idx] - namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, memberCluster.ClusterName) - - internalSvcExportKey := types.NamespacedName{Namespace: namespaceName, Name: internalServiceExportName} - var export fleetnetworkingv1alpha1.InternalServiceExport - Expect(hubClient.Get(ctx, internalSvcExportKey, &export)).Should(Succeed(), "Failed to get internalServiceExport") - export.Finalizers = nil - Expect(hubClient.Update(ctx, &export)).To(Succeed(), "Failed to update internalServiceExport") + Describe("Test member cluster join and leave flow for resource placement", Ordered, Serial, func() { + BeforeAll(func() { + resourceSelectors := []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "Namespace", + Version: "v1", + Name: workNamespaceName, + SelectionScope: placementv1beta1.NamespaceOnly, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: "ClusterResourceEnvelope", + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, + }, } - }) + createCRPWithApplyStrategy(crpName, nil, resourceSelectors) - It("Should be able to unjoin a cluster with crp still running", func() { - checkIfAllMemberClustersHaveLeft() - }) + wantCRPSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ClusterResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testClusterResourceEnvelope.Name, + }, + } - It("Should update CRP status to not placing any resources since all clusters are left", func() { - // resourceQuota is enveloped so it's not trackable yet - crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, nil, nil, "0", false) - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - }) + By("should update CRP status as expected") + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantCRPSelectedResources, allMemberClusterNames, nil, "0", true) + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - It("Validating if the resources are still on all member clusters", func() { - for idx := range allMemberClusters { - memberCluster := allMemberClusters[idx] - workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) - Consistently(workResourcesPlacedActual, 3*consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + wantRPSelectedResources = []placementv1beta1.ResourceIdentifier{ + { + Kind: "ConfigMap", + Name: testConfigMap.Name, + Version: "v1", + Namespace: workNamespaceName, + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testResourceEnvelope.Name, + Namespace: workNamespaceName, + }, } }) - It("Should be able to rejoin the cluster", func() { - By("rejoin all the clusters without deleting the CRP") - setAllMemberClustersToJoin() - checkIfAllMemberClustersHaveJoined() - checkIfAzurePropertyProviderIsWorking() + Context("Test cluster join and leave flow with RP not deleted", Label("joinleave"), Ordered, Serial, func() { + It("Create the RP that select the config map and place it to all clusters", func() { + resourceSelectors := []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Name: testConfigMap.Name, + Version: "v1", + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: testResourceEnvelope.Name, + }, + } + createRPWithApplyStrategy(workNamespaceName, rpName, nil, resourceSelectors) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantRPSelectedResources, allMemberClusterNames, nil, "0", true) + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("should place the resources on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) + Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + // Skip the serviceExport check + It("Should be able to trigger the member cluster DELETE", func() { + setAllMemberClustersToLeave() + }) + + It("Should be able to unjoin a cluster with rp still running", func() { + checkIfAllMemberClustersHaveLeft() + }) + + It("Should update CRP status to not placing any resources since all clusters are left", func() { + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantCRPSelectedResources, nil, nil, "0", false) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("Should update RP status to not placing any resources since all clusters are left", func() { + // resourceQuota is enveloped so it's not trackable yet + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantRPSelectedResources, nil, nil, "0", false) + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) + + It("Validating if the resources are still on all member clusters", func() { + for idx := range allMemberClusters { + memberCluster := allMemberClusters[idx] + workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) + Consistently(workResourcesPlacedActual, 3*consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + } + }) + + It("Should be able to rejoin the cluster", func() { + By("rejoin all the clusters without deleting the RP") + setAllMemberClustersToJoin() + checkIfAllMemberClustersHaveJoined() + checkIfAzurePropertyProviderIsWorking() + }) + + It("should update CRP status to applied to all clusters again automatically after rejoining", func() { + crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantCRPSelectedResources, allMemberClusterNames, nil, "0", true) + Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + It("should update RP status to applied to all clusters again automatically after rejoining", func() { + rpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(rpKey, wantRPSelectedResources, allMemberClusterNames, nil, "0", true) + Eventually(rpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP status as expected") + }) }) - It("should update CRP status to applied to all clusters again automatically after rejoining", func() { - crpStatusUpdatedActual := customizedPlacementStatusUpdatedActual(types.NamespacedName{Name: crpName}, wantSelectedResources, allMemberClusterNames, nil, "0", true) - Eventually(crpStatusUpdatedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - }) - }) + AfterAll(func() { + By(fmt.Sprintf("deleting resource placement %s and related resources", rpName)) + ensureRPAndRelatedResourcesDeleted(rpKey, allMemberClusters) - AfterAll(func() { - By(fmt.Sprintf("deleting placement %s and related resources", crpName)) - ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + By(fmt.Sprintf("deleting placement %s and related resources", crpName)) + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) }) }) diff --git a/test/e2e/resource_placement_apply_strategy_test.go b/test/e2e/resource_placement_apply_strategy_test.go index 51ac4614b..96403cf15 100644 --- a/test/e2e/resource_placement_apply_strategy_test.go +++ b/test/e2e/resource_placement_apply_strategy_test.go @@ -93,7 +93,7 @@ var _ = Describe("validating resource placement using different apply strategies // Create the RP. strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: true} - createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + createRPWithApplyStrategy(workNamespaceName, rpName, strategy, nil) }) AfterAll(func() { @@ -153,7 +153,7 @@ var _ = Describe("validating resource placement using different apply strategies // Create the RP. strategy := &placementv1beta1.ApplyStrategy{AllowCoOwnership: false} - createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + createRPWithApplyStrategy(workNamespaceName, rpName, strategy, nil) }) AfterAll(func() { @@ -207,7 +207,7 @@ var _ = Describe("validating resource placement using different apply strategies Type: placementv1beta1.ApplyStrategyTypeServerSideApply, AllowCoOwnership: false, } - createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + createRPWithApplyStrategy(workNamespaceName, rpName, strategy, nil) }) AfterAll(func() { @@ -261,7 +261,7 @@ var _ = Describe("validating resource placement using different apply strategies Type: placementv1beta1.ApplyStrategyTypeServerSideApply, AllowCoOwnership: false, } - createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + createRPWithApplyStrategy(workNamespaceName, rpName, strategy, nil) }) AfterAll(func() { @@ -378,7 +378,7 @@ var _ = Describe("validating resource placement using different apply strategies ServerSideApplyConfig: &placementv1beta1.ServerSideApplyConfig{ForceConflicts: true}, AllowCoOwnership: true, } - createRPWithApplyStrategy(workNamespaceName, rpName, strategy) + createRPWithApplyStrategy(workNamespaceName, rpName, strategy, nil) }) AfterAll(func() { diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 7986e70bd..54154f273 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -1485,7 +1485,7 @@ func buildOwnerReference(cluster *framework.Cluster, crpName string) *metav1.Own } // createRPWithApplyStrategy creates a ResourcePlacement with the given name and apply strategy. -func createRPWithApplyStrategy(rpNamespace, rpName string, applyStrategy *placementv1beta1.ApplyStrategy) { +func createRPWithApplyStrategy(rpNamespace, rpName string, applyStrategy *placementv1beta1.ApplyStrategy, resourceSelectors []placementv1beta1.ResourceSelectorTerm) { rp := &placementv1beta1.ResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: rpName, @@ -1507,15 +1507,15 @@ func createRPWithApplyStrategy(rpNamespace, rpName string, applyStrategy *placem if applyStrategy != nil { rp.Spec.Strategy.ApplyStrategy = applyStrategy } + if resourceSelectors != nil { + rp.Spec.ResourceSelectors = resourceSelectors + } By(fmt.Sprintf("creating placement %s", rpName)) Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s", rpName) } // createCRPWithApplyStrategy creates a ClusterResourcePlacement with the given name and apply strategy. func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1.ApplyStrategy, resourceSelectors []placementv1beta1.ResourceSelectorTerm) { - if resourceSelectors == nil { - resourceSelectors = workResourceSelector() - } crp := &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ Name: crpName, @@ -1524,7 +1524,7 @@ func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1. Finalizers: []string{customDeletionBlockerFinalizer}, }, Spec: placementv1beta1.PlacementSpec{ - ResourceSelectors: resourceSelectors, + ResourceSelectors: workResourceSelector(), Strategy: placementv1beta1.RolloutStrategy{ Type: placementv1beta1.RollingUpdateRolloutStrategyType, RollingUpdate: &placementv1beta1.RollingUpdateConfig{ @@ -1536,13 +1536,16 @@ func createCRPWithApplyStrategy(crpName string, applyStrategy *placementv1beta1. if applyStrategy != nil { crp.Spec.Strategy.ApplyStrategy = applyStrategy } + if resourceSelectors != nil { + crp.Spec.ResourceSelectors = resourceSelectors + } By(fmt.Sprintf("creating placement %s", crpName)) Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) } // createRP creates a ResourcePlacement with the given name. func createRP(rpNamespace, rpName string) { - createRPWithApplyStrategy(rpNamespace, rpName, nil) + createRPWithApplyStrategy(rpNamespace, rpName, nil, nil) } // createCRP creates a ClusterResourcePlacement with the given name. From bd289f111cae430bfc93d20f46f6a97de4ea5c34 Mon Sep 17 00:00:00 2001 From: Wantong Date: Sun, 31 Aug 2025 23:00:00 -0700 Subject: [PATCH 9/9] test: add e2e tests for RP for resource selection (#236) Signed-off-by: Wantong Jiang Signed-off-by: michaelawyu --- docker/refresh-token.Dockerfile | 2 +- pkg/controllers/placement/controller.go | 2 +- test/e2e/actuals_test.go | 23 + .../e2e/placement_selecting_resources_test.go | 5 +- ...urce_placement_selecting_resources_test.go | 1108 +++++++++++++++++ 5 files changed, 1135 insertions(+), 5 deletions(-) create mode 100644 test/e2e/resource_placement_selecting_resources_test.go diff --git a/docker/refresh-token.Dockerfile b/docker/refresh-token.Dockerfile index 6bea5ceb1..5796d058c 100644 --- a/docker/refresh-token.Dockerfile +++ b/docker/refresh-token.Dockerfile @@ -1,4 +1,4 @@ -# Build the hubagent binary +# Build the refreshtoken binary FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.6 AS builder WORKDIR /workspace diff --git a/pkg/controllers/placement/controller.go b/pkg/controllers/placement/controller.go index 7f31fdb7c..bb65a7304 100644 --- a/pkg/controllers/placement/controller.go +++ b/pkg/controllers/placement/controller.go @@ -153,7 +153,7 @@ func (r *Reconciler) handleUpdate(ctx context.Context, placementObj fleetv1beta1 // TODO, create a separate user type error struct to improve the user facing messages scheduleCondition := metav1.Condition{ Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Type: getPlacementScheduledConditionType(placementObj), Reason: condition.InvalidResourceSelectorsReason, Message: fmt.Sprintf("The resource selectors are invalid: %v", err), ObservedGeneration: placementObj.GetGeneration(), diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 506916bc9..934ca099f 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -1750,6 +1750,29 @@ func validateCRPSnapshotRevisions(crpName string, wantPolicySnapshotRevision, wa return nil } +func validateRPSnapshotRevisions(rpName, namespace string, wantPolicySnapshotRevision, wantResourceSnapshotRevision int) error { + listOptions := []client.ListOption{ + client.InNamespace(namespace), + client.MatchingLabels{placementv1beta1.PlacementTrackingLabel: rpName}, + } + + snapshotList := &placementv1beta1.SchedulingPolicySnapshotList{} + if err := hubClient.List(ctx, snapshotList, listOptions...); err != nil { + return err + } + if len(snapshotList.Items) != wantPolicySnapshotRevision { + return fmt.Errorf("schedulingPolicySnapshotList got %v, want 1", len(snapshotList.Items)) + } + resourceSnapshotList := &placementv1beta1.ResourceSnapshotList{} + if err := hubClient.List(ctx, resourceSnapshotList, listOptions...); err != nil { + return err + } + if len(resourceSnapshotList.Items) != wantResourceSnapshotRevision { + return fmt.Errorf("resourceSnapshotList got %v, want 2", len(resourceSnapshotList.Items)) + } + return nil +} + func updateRunClusterRolloutSucceedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index 35ff3cee3..601173394 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -812,7 +812,7 @@ var _ = Describe("validating CRP when failed to apply resources", Ordered, func( propagage := metav1.DeletePropagationForeground Expect(allMemberClusters[0].KubeClient.Delete(ctx, &existingNS, &client.DeleteOptions{PropagationPolicy: &propagage})).Should(Succeed(), "Failed to delete namespace %s", existingNS.Name) - By(fmt.Sprintf("garbage all things related to placement %s", crpName)) + By(fmt.Sprintf("garbage collect all things related to placement %s", crpName)) ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) }) @@ -1217,6 +1217,7 @@ var _ = Describe("validating CRP when selected resources cross the 1MB limit", O crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) BeforeAll(func() { By("creating resources for multiple resource snapshots") + createWorkResources() createResourcesForMultipleResourceSnapshots() // Create the CRP. @@ -1414,8 +1415,6 @@ var _ = Describe("creating CRP and checking selected resources order", Ordered, }) func createResourcesForMultipleResourceSnapshots() { - createWorkResources() - for i := 0; i < 3; i++ { var secret corev1.Secret Expect(utils.GetObjectFromManifest("../integration/manifests/resources/test-large-secret.yaml", &secret)).Should(Succeed(), "Failed to read large secret from file") diff --git a/test/e2e/resource_placement_selecting_resources_test.go b/test/e2e/resource_placement_selecting_resources_test.go new file mode 100644 index 000000000..069dd3269 --- /dev/null +++ b/test/e2e/resource_placement_selecting_resources_test.go @@ -0,0 +1,1108 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "strconv" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiResource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/controllers/workapplier" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" + "github.com/kubefleet-dev/kubefleet/test/e2e/framework" +) + +var _ = Describe("testing RP selecting resources", Label("resourceplacement"), func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + + BeforeEach(OncePerOrdered, func() { + By("creating work resources") + createNamespace() + + // Create the CRP with Namespace-only selector. + createNamespaceOnlyCRP(crpName) + + By("should update CRP status as expected") + crpStatusUpdatedActual := crpStatusUpdatedActual(workNamespaceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterEach(OncePerOrdered, func() { + ensureCRPAndRelatedResourcesDeleted(crpName, allMemberClusters) + }) + + Context("selecting resources by name", Ordered, func() { + BeforeAll(func() { + createConfigMap() + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", rp.Namespace, rpName) + }) + + It("should remove placed resources from all member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", appNamespace().Name, rpName) + }) + }) + + Context("selecting resources by label", Ordered, func() { + BeforeAll(func() { + // Create the configMap with a label so that it can be selected by the RP. + configMap := appConfigMap() + if configMap.Labels == nil { + configMap.Labels = make(map[string]string) + } + configMap.Labels["app"] = strconv.Itoa(GinkgoParallelProcess()) + Expect(hubClient.Create(ctx, &configMap)).Should(Succeed(), "Failed to create config map %s/%s", configMap.Namespace, configMap.Name) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": strconv.Itoa(GinkgoParallelProcess()), + }, + }, + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", rp.Namespace, rpName) + }) + + It("should remove placed resources from all member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", appNamespace().Name, rpName) + }) + }) + + Context("validating RP when namespaced resources become selected after the updates", Ordered, func() { + BeforeAll(func() { + createConfigMap() + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": fmt.Sprintf("test-%d", GinkgoParallelProcess()), + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(5), + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should not place work resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("updating the resources on the hub and the configMap becomes selected", func() { + cm := appConfigMap() + configMap := &corev1.ConfigMap{} + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: cm.Namespace, Name: cm.Name}, configMap)).Should(Succeed(), "Failed to get the config map %s/%s", cm.Namespace, cm.Name) + + if configMap.Labels == nil { + configMap.Labels = make(map[string]string) + } + configMap.Labels["app"] = fmt.Sprintf("test-%d", GinkgoParallelProcess()) + Expect(hubClient.Update(ctx, configMap)).Should(Succeed(), "Failed to update config map %s/%s", cm.Namespace, cm.Name) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", rp.Namespace, rpName) + }) + + It("should remove placed resources from all member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", appNamespace().Name, rpName) + }) + }) + + Context("validating RP when namespaced resources become unselected after the updates", Ordered, func() { + BeforeAll(func() { + // Create a configMap with a label so it can be selected. + configMap := appConfigMap() + if configMap.Labels == nil { + configMap.Labels = make(map[string]string) + } + configMap.Labels["app"] = strconv.Itoa(GinkgoParallelProcess()) + Expect(hubClient.Create(ctx, &configMap)).Should(Succeed(), "Failed to update config map %s/%s", configMap.Namespace, configMap.Name) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": strconv.Itoa(GinkgoParallelProcess()), + }, + }, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(5), + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("updating the resources on the hub and the configMap becomes unselected", func() { + cm := appConfigMap() + configMap := &corev1.ConfigMap{} + Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: cm.Namespace, Name: cm.Name}, configMap)).Should(Succeed(), "Failed to get the config map %s/%s", cm.Namespace, cm.Name) + + if configMap.Labels == nil { + configMap.Labels = make(map[string]string) + } + configMap.Labels["app"] = fmt.Sprintf("test-%d", GinkgoParallelProcess()) + Expect(hubClient.Update(ctx, configMap)).Should(Succeed(), "Failed to update config map %s/%s", cm.Namespace, cm.Name) + }) + + It("should remove the selected resources on member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should update RP status as expected", func() { + // If there are no resources selected, the available condition reason will become "AllWorkAreAvailable". + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", rp.Namespace, rpName) + }) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", appNamespace().Name, rpName) + }) + }) + + Context("validating RP when selecting a reserved resource", Ordered, func() { + BeforeAll(func() { + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: "kube-root-ca.crt", + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(5), + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("deleting placement %s/%s", rpName, appNamespace().Name)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + // The configMap should not be selected. + rpStatusUpdatedActual := rpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", rp.Namespace, rpName) + }) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", appNamespace().Name, rpName) + }) + }) + + Context("When creating a pickN RP with duplicated resources", Ordered, func() { + BeforeAll(func() { + By("creating configMap resource") + createConfigMap() + + By("Create a rp that selects the same resource twice") + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "ConfigMap", + Name: appConfigMap().Name, + }, + { + Group: corev1.GroupName, + Version: "v1", + Kind: "ConfigMap", + Name: appConfigMap().Name, + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + Eventually(func() error { + gotRP := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, gotRP); err != nil { + return err + } + wantStatus := placementv1beta1.PlacementStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Type: string(placementv1beta1.ResourcePlacementScheduledConditionType), + Reason: condition.InvalidResourceSelectorsReason, + ObservedGeneration: 1, + }, + }, + } + if diff := cmp.Diff(gotRP.Status, wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("updating the RP to select one configmap", func() { + gotRP := &placementv1beta1.ResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, gotRP)).Should(Succeed(), "Failed to get RP %s/%s", appNamespace().Name, rpName) + // Just keep one configMap selector. + gotRP.Spec.ResourceSelectors = gotRP.Spec.ResourceSelectors[:1] + Expect(hubClient.Update(ctx, gotRP)).To(Succeed(), "Failed to update RP %s/%s", appNamespace().Name, rpName) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + }) + + Context("validating RP when failed to apply resources", Ordered, func() { + var existingConfigMap corev1.ConfigMap + BeforeAll(func() { + By("creating configMap resource on hub cluster") + createConfigMap() + + existingConfigMap = appConfigMap() + existingConfigMap.SetOwnerReferences([]metav1.OwnerReference{ + { + APIVersion: "another-api-version", + Kind: "another-kind", + Name: "another-owner", + UID: "another-uid", + }, + }) + By(fmt.Sprintf("creating configMap %s on member cluster", existingConfigMap.Name)) + Expect(allMemberClusters[0].KubeClient.Create(ctx, &existingConfigMap)).Should(Succeed(), "Failed to create configMap %s", existingConfigMap.Name) + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: configMapSelector(), + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("garbage collect the %s", existingConfigMap.Name)) + propagage := metav1.DeletePropagationForeground + Expect(allMemberClusters[0].KubeClient.Delete(ctx, &existingConfigMap, &client.DeleteOptions{PropagationPolicy: &propagage})).Should(Succeed(), "Failed to delete configMap %s", existingConfigMap.Name) + + By(fmt.Sprintf("garbage collect all things related to placement %s/%s", rpName, appNamespace().Name)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}, rp); err != nil { + return err + } + + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + wantStatus := placementv1beta1.PlacementStatus{ + Conditions: rpAppliedFailedConditions(rp.Generation), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Kind: "ConfigMap", + Name: appConfigMapName, + Version: "v1", + Namespace: appNamespace().Name, + }, + Condition: metav1.Condition{ + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(workapplier.ApplyOrReportDiffResTypeFailedToTakeOver), + ObservedGeneration: 0, + }, + }, + }, + Conditions: perClusterApplyFailedConditions(rp.Generation), + }, + { + ClusterName: memberCluster2EastCanaryName, + ObservedResourceIndex: "0", + Conditions: perClusterRolloutCompletedConditions(rp.Generation, true, false), + }, + { + ClusterName: memberCluster3WestProdName, + ObservedResourceIndex: "0", + Conditions: perClusterRolloutCompletedConditions(rp.Generation, true, false), + }, + }, + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Kind: "ConfigMap", + Name: appConfigMapName, + Version: "v1", + Namespace: appNamespace().Name, + }, + }, + ObservedResourceIndex: "0", + } + if diff := cmp.Diff(rp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + } + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: appNamespace().Name, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", rp.Namespace, rpName) + }) + + It("should remove placed resources from member clusters excluding the first one", func() { + checkIfRemovedConfigMapFromMemberClusters(allMemberClusters[1:]) + }) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: appNamespace().Name}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", appNamespace().Name, rpName) + }) + + It("configMap should be kept on member cluster", func() { + Consistently(func() error { + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + cm := &corev1.ConfigMap{} + return allMemberClusters[0].KubeClient.Get(ctx, types.NamespacedName{Name: appConfigMapName, Namespace: appNamespace().Name}, cm) + }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "ConfigMap which is not owned by the RP should not be deleted") + }) + }) + + Context("validating RP revision history allowing single revision when updating resource selector", Ordered, func() { + workNamespace := appNamespace().Name + BeforeAll(func() { + By("creating configMap resource") + createConfigMap() + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: "invalid-configmap", + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(5), + }, + }, + RevisionHistoryLimit: ptr.To(int32(1)), + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(nil, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("adding resource selectors", func() { + updateFunc := func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespace}, rp); err != nil { + return err + } + + rp.Spec.ResourceSelectors = append(rp.Spec.ResourceSelectors, placementv1beta1.ResourceSelectorTerm{ + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()), + }) + // may hit 409 + return hubClient.Update(ctx, rp) + } + Eventually(updateFunc, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the rp %s/%s", workNamespace, rpName) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have one policy snapshot revision and one resource snapshot revision", func() { + Expect(validateRPSnapshotRevisions(rpName, workNamespace, 1, 1)).Should(Succeed(), "Failed to validate the revision history") + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", workNamespace, rpName) + }) + + It("should remove placed resources from all member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: workNamespace}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", workNamespace, rpName) + }) + }) + + Context("validating RP revision history allowing multiple revisions when updating resource selector", Ordered, func() { + workNamespace := appNamespace().Name + BeforeAll(func() { + By("creating configMap resource") + createConfigMap() + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: "invalid-configmap", + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(5), + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(nil, allMemberClusterNames, nil, "0") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("adding resource selectors", func() { + updateFunc := func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: workNamespace}, rp); err != nil { + return err + } + + rp.Spec.ResourceSelectors = append(rp.Spec.ResourceSelectors, placementv1beta1.ResourceSelectorTerm{ + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()), + }) + // may hit 409 + return hubClient.Update(ctx, rp) + } + Eventually(updateFunc, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the rp %s/%s", workNamespace, rpName) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(appConfigMapIdentifiers(), allMemberClusterNames, nil, "1") + Eventually(rpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should have one policy snapshot revision and two resource snapshot revisions", func() { + Expect(validateRPSnapshotRevisions(rpName, workNamespace, 1, 2)).Should(Succeed(), "Failed to validate the revision history") + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", workNamespace, rpName) + }) + + It("should remove placed resources from all member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: workNamespace}) + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", workNamespace, rpName) + }) + }) + + // running spec in parallel with other specs causes timeouts. + Context("validating RP when selected resources cross the 1MB limit", Ordered, Serial, func() { + workNamespace := appNamespace().Name + BeforeAll(func() { + By("creating resources for multiple resource snapshots") + createConfigMap() + createResourcesForMultipleResourceSnapshots() + + // Create the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, + }, + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + Name: appConfigMap().Name, + }, + // Select all the secrets. + { + Group: "", + Kind: "Secret", + Version: "v1", + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + secrets := make([]client.Object, 3) + for i := 2; i >= 0; i-- { + secrets[i] = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(appSecretNameTemplate, i), + Namespace: workNamespace, + }, + } + } + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: workNamespace}, allMemberClusters, secrets...) + }) + + It("check if created resource snapshots are as expected", func() { + Eventually(multipleRPResourceSnapshotsCreatedActual("2", "2", "0"), largeEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to check created resource snapshots", rpName) + }) + + It("should update RP status as expected", func() { + rpStatusUpdatedActual := rpStatusUpdatedActual(resourceIdentifiersForMultipleResourcesSnapshotsRP(), []string{memberCluster1EastProdName, memberCluster2EastCanaryName}, nil, "0") + Eventually(rpStatusUpdatedActual, largeEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s status as expected", rpName) + }) + + It("should place the selected resources on member clusters", func() { + targetMemberClusters := []*framework.Cluster{memberCluster1EastProd, memberCluster2EastCanary} + checkIfPlacedWorkResourcesOnTargetMemberClusters(targetMemberClusters) + checkIfPlacedLargeSecretResourcesOnTargetMemberClusters(targetMemberClusters) + }) + + It("can delete the RP", func() { + // Delete the RP. + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: workNamespace, + }, + } + Expect(hubClient.Delete(ctx, rp)).To(Succeed(), "Failed to delete RP %s/%s", workNamespace, rpName) + }) + + It("should remove placed resources from all member clusters", checkIfRemovedConfigMapFromAllMemberClusters) + + It("should remove controller finalizers from RP", func() { + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromPlacementActual(types.NamespacedName{Name: rpName, Namespace: workNamespace}) + Eventually(finalizerRemovedActual, largeEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from RP %s/%s", workNamespace, rpName) + }) + }) + + Context("creating RP and checking selected resources order", Ordered, func() { + nsName := appNamespace().Name + var configMap *corev1.ConfigMap + var secret *corev1.Secret + var pvc *corev1.PersistentVolumeClaim + var role *rbacv1.Role + + BeforeAll(func() { + // Create ConfigMap + configMap = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-configmap-%d", GinkgoParallelProcess()), + Namespace: nsName, + }, + Data: map[string]string{ + "key1": "value1", + }, + } + Expect(hubClient.Create(ctx, configMap)).To(Succeed(), "Failed to create ConfigMap") + + // Create Secret + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-secret-%d", GinkgoParallelProcess()), + Namespace: nsName, + }, + StringData: map[string]string{ + "username": "test-user", + "password": "test-password", + }, + Type: corev1.SecretTypeOpaque, + } + Expect(hubClient.Create(ctx, secret)).To(Succeed(), "Failed to create Secret") + + // Create PersistentVolumeClaim + pvc = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-pvc-%d", GinkgoParallelProcess()), + Namespace: nsName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + StorageClassName: ptr.To("standard"), + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: apiResource.MustParse("1Gi"), + }, + }, + }, + } + Expect(hubClient.Create(ctx, pvc)).To(Succeed(), "Failed to create PVC") + + // Create Role + role = &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-role-%d", GinkgoParallelProcess()), + Namespace: nsName, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list"}, + }, + }, + } + Expect(hubClient.Create(ctx, role)).To(Succeed(), "Failed to create Role") + + // Create the RP + rp := &placementv1beta1.ResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: rpName, + Namespace: nsName, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: []placementv1beta1.ResourceSelectorTerm{ + { + Group: "", + Kind: "ConfigMap", + Version: "v1", + }, + { + Group: "", + Kind: "Secret", + Version: "v1", + }, + { + Group: "", + Kind: "PersistentVolumeClaim", + Version: "v1", + }, + { + Group: "rbac.authorization.k8s.io", + Kind: "Role", + Version: "v1", + }, + }, + }, + } + By(fmt.Sprintf("creating placement %s/%s", rp.Namespace, rpName)) + Expect(hubClient.Create(ctx, rp)).To(Succeed(), "Failed to create RP %s/%s", rp.Namespace, rpName) + }) + + AfterAll(func() { + By(fmt.Sprintf("garbage collect all things related to placement %s/%s", rpName, nsName)) + ensureRPAndRelatedResourcesDeleted(types.NamespacedName{Name: rpName, Namespace: nsName}, allMemberClusters, configMap, secret, pvc, role) + }) + + It("should update RP status with the correct order of the selected resources", func() { + // Define the expected resources in order. + expectedResources := []placementv1beta1.ResourceIdentifier{ + {Kind: "Secret", Name: secret.Name, Namespace: nsName, Version: "v1"}, + {Kind: "ConfigMap", Name: configMap.Name, Namespace: nsName, Version: "v1"}, + {Kind: "PersistentVolumeClaim", Name: pvc.Name, Namespace: nsName, Version: "v1"}, + {Group: "rbac.authorization.k8s.io", Kind: "Role", Name: role.Name, Namespace: nsName, Version: "v1"}, + } + + Eventually(func() error { + rp := &placementv1beta1.ResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: rpName, Namespace: nsName}, rp); err != nil { + return err + } + if diff := cmp.Diff(rp.Status.SelectedResources, expectedResources); diff != "" { + return fmt.Errorf("RP status diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update RP %s selected resource status as expected", rpName) + }) + }) +}) + +func multipleRPResourceSnapshotsCreatedActual(wantTotalNumberOfResourceSnapshots, wantNumberOfMasterIndexedResourceSnapshots, wantResourceIndex string) func() error { + rpName := fmt.Sprintf(rpNameTemplate, GinkgoParallelProcess()) + workNamespace := appNamespace().Name + + return func() error { + var resourceSnapshotList placementv1beta1.ResourceSnapshotList + masterResourceSnapshotLabels := client.MatchingLabels{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.PlacementTrackingLabel: rpName, + } + if err := hubClient.List(ctx, &resourceSnapshotList, masterResourceSnapshotLabels, client.InNamespace(workNamespace)); err != nil { + return err + } + // there should be only one master resource snapshot. + if len(resourceSnapshotList.Items) != 1 { + return fmt.Errorf("number of master resource snapshots has unexpected value: got %d, want %d", len(resourceSnapshotList.Items), 1) + } + masterResourceSnapshot := resourceSnapshotList.Items[0] + // labels to list all existing resource snapshots. + resourceSnapshotListLabels := client.MatchingLabels{placementv1beta1.PlacementTrackingLabel: rpName} + if err := hubClient.List(ctx, &resourceSnapshotList, resourceSnapshotListLabels, client.InNamespace(workNamespace)); err != nil { + return err + } + // ensure total number of resource snapshots equals to wanted number of resource snapshots + if strconv.Itoa(len(resourceSnapshotList.Items)) != wantTotalNumberOfResourceSnapshots { + return fmt.Errorf("total number of resource snapshots has unexpected value: got %s, want %s", strconv.Itoa(len(resourceSnapshotList.Items)), wantTotalNumberOfResourceSnapshots) + } + numberOfResourceSnapshots := masterResourceSnapshot.Annotations[placementv1beta1.NumberOfResourceSnapshotsAnnotation] + if numberOfResourceSnapshots != wantNumberOfMasterIndexedResourceSnapshots { + return fmt.Errorf("NumberOfResourceSnapshotsAnnotation in master resource snapshot has unexpected value: got %s, want %s", numberOfResourceSnapshots, wantNumberOfMasterIndexedResourceSnapshots) + } + masterResourceIndex := masterResourceSnapshot.Labels[placementv1beta1.ResourceIndexLabel] + if masterResourceIndex != wantResourceIndex { + return fmt.Errorf("resource index for master resource snapshot %s has unexpected value: got %s, want %s", masterResourceSnapshot.Name, masterResourceIndex, wantResourceIndex) + } + // labels to list all resource snapshots with master resource index. + resourceSnapshotListLabels = client.MatchingLabels{ + placementv1beta1.ResourceIndexLabel: masterResourceIndex, + placementv1beta1.PlacementTrackingLabel: rpName, + } + if err := hubClient.List(ctx, &resourceSnapshotList, resourceSnapshotListLabels, client.InNamespace(workNamespace)); err != nil { + return err + } + if strconv.Itoa(len(resourceSnapshotList.Items)) != wantNumberOfMasterIndexedResourceSnapshots { + return fmt.Errorf("number of resource snapshots with master resource index has unexpected value: got %s, want %s", strconv.Itoa(len(resourceSnapshotList.Items)), wantNumberOfMasterIndexedResourceSnapshots) + } + return nil + } +} + +func resourceIdentifiersForMultipleResourcesSnapshotsRP() []placementv1beta1.ResourceIdentifier { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + var placementResourceIdentifiers []placementv1beta1.ResourceIdentifier + + for i := 2; i >= 0; i-- { + placementResourceIdentifiers = append(placementResourceIdentifiers, placementv1beta1.ResourceIdentifier{ + Kind: "Secret", + Name: fmt.Sprintf(appSecretNameTemplate, i), + Namespace: workNamespaceName, + Version: "v1", + }) + } + + placementResourceIdentifiers = append(placementResourceIdentifiers, placementv1beta1.ResourceIdentifier{ + Kind: "ConfigMap", + Name: fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()), + Version: "v1", + Namespace: workNamespaceName, + }) + + return placementResourceIdentifiers +}