From 9746243c6979f5380d3c42d9aea9502d15a89f62 Mon Sep 17 00:00:00 2001 From: Vince Prignano Date: Fri, 28 May 2021 09:03:33 -0700 Subject: [PATCH] :warning: Move envtest setup under internal/envtest This change refactors all envtest suites to use the internal/envtest package instead of test/helpers. These helpers aren't useful to folks outside of Cluster API because of the nature how we setup the CRDs. Signed-off-by: Vince Prignano --- .../{webhook_suite_test.go => suite_test.go} | 16 +- api/v1alpha3/webhook_test.go | 18 +- .../{webhook_suite_test.go => suite_test.go} | 16 +- .../kubeadm/api/v1alpha3/webhook_test.go | 12 +- ...ubeadmconfig_controller_reconciler_test.go | 8 +- bootstrap/kubeadm/controllers/suite_test.go | 16 +- bootstrap/util/suite_test.go | 16 +- controllers/cluster_controller_test.go | 64 ++--- .../machine_controller_node_labels_test.go | 20 +- controllers/machine_controller_test.go | 94 +++---- .../machinedeployment_controller_test.go | 56 ++-- .../machinehealthcheck_controller_test.go | 252 +++++++++--------- controllers/machineset_controller_test.go | 34 +-- .../remote/cluster_cache_healthcheck_test.go | 12 +- .../remote/cluster_cache_reconciler_test.go | 6 +- .../remote/cluster_cache_tracker_test.go | 6 +- controllers/remote/suite_test.go | 16 +- controllers/schema_test.go | 12 +- controllers/suite_test.go | 52 ++-- controllers/suite_util_test.go | 24 +- .../{webhook_suite_test.go => suite_test.go} | 16 +- .../kubeadm/api/v1alpha3/webhook_test.go | 6 +- .../kubeadm/controllers/controller_test.go | 28 +- .../kubeadm/controllers/remediation_test.go | 104 ++++---- .../kubeadm/controllers/suite_test.go | 16 +- controlplane/kubeadm/internal/cluster_test.go | 14 +- controlplane/kubeadm/internal/suite_test.go | 16 +- .../internal/workload_cluster_coredns_test.go | 12 +- .../internal/workload_cluster_etcd_test.go | 8 +- .../{webhook_suite_test.go => suite_test.go} | 16 +- exp/addons/api/v1alpha3/webhook_test.go | 12 +- .../clusterresourceset_controller_test.go | 82 +++--- exp/addons/controllers/suite_test.go | 26 +- exp/api/v1alpha3/suite_test.go | 57 ++++ exp/api/v1alpha3/webhook_suite_test.go | 57 ---- exp/api/v1alpha3/webhook_test.go | 6 +- .../machinepool_controller_phases_test.go | 18 +- exp/controllers/suite_test.go | 22 +- internal/envtest/doc.go | 18 ++ .../envtest/environment.go | 29 +- util/collections/suite_test.go | 16 +- util/patch/patch_test.go | 184 ++++++------- util/patch/suite_test.go | 16 +- 43 files changed, 773 insertions(+), 756 deletions(-) rename api/v1alpha3/{webhook_suite_test.go => suite_test.go} (79%) rename bootstrap/kubeadm/api/v1alpha3/{webhook_suite_test.go => suite_test.go} (79%) rename controlplane/kubeadm/api/v1alpha3/{webhook_suite_test.go => suite_test.go} (79%) rename exp/addons/api/v1alpha3/{webhook_suite_test.go => suite_test.go} (79%) create mode 100644 exp/api/v1alpha3/suite_test.go delete mode 100644 exp/api/v1alpha3/webhook_suite_test.go create mode 100644 internal/envtest/doc.go rename test/helpers/envtest.go => internal/envtest/environment.go (93%) diff --git a/api/v1alpha3/webhook_suite_test.go b/api/v1alpha3/suite_test.go similarity index 79% rename from api/v1alpha3/webhook_suite_test.go rename to api/v1alpha3/suite_test.go index e94f87abc796..2f899e906291 100644 --- a/api/v1alpha3/webhook_suite_test.go +++ b/api/v1alpha3/suite_test.go @@ -23,32 +23,32 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { // Bootstrapping test environment utilruntime.Must(AddToScheme(scheme.Scheme)) - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() // Run tests code := m.Run() // Tearing down the test environment - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) } diff --git a/api/v1alpha3/webhook_test.go b/api/v1alpha3/webhook_test.go index 886b09653c62..66c2e34630be 100644 --- a/api/v1alpha3/webhook_test.go +++ b/api/v1alpha3/webhook_test.go @@ -32,7 +32,7 @@ import ( func TestClusterConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) cluster := &Cluster{ @@ -42,15 +42,15 @@ func TestClusterConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, cluster) } func TestMachineSetConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) @@ -69,15 +69,15 @@ func TestMachineSetConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, machineSet)).To(Succeed()) + g.Expect(env.Create(ctx, machineSet)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, machineSet) } func TestMachineDeploymentConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) @@ -94,9 +94,9 @@ func TestMachineDeploymentConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, machineDeployment)).To(Succeed()) + g.Expect(env.Create(ctx, machineDeployment)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, machineDeployment) } diff --git a/bootstrap/kubeadm/api/v1alpha3/webhook_suite_test.go b/bootstrap/kubeadm/api/v1alpha3/suite_test.go similarity index 79% rename from bootstrap/kubeadm/api/v1alpha3/webhook_suite_test.go rename to bootstrap/kubeadm/api/v1alpha3/suite_test.go index e94f87abc796..2f899e906291 100644 --- a/bootstrap/kubeadm/api/v1alpha3/webhook_suite_test.go +++ b/bootstrap/kubeadm/api/v1alpha3/suite_test.go @@ -23,32 +23,32 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { // Bootstrapping test environment utilruntime.Must(AddToScheme(scheme.Scheme)) - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() // Run tests code := m.Run() // Tearing down the test environment - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) } diff --git a/bootstrap/kubeadm/api/v1alpha3/webhook_test.go b/bootstrap/kubeadm/api/v1alpha3/webhook_test.go index 6e0c00be55ed..075071463fc6 100644 --- a/bootstrap/kubeadm/api/v1alpha3/webhook_test.go +++ b/bootstrap/kubeadm/api/v1alpha3/webhook_test.go @@ -32,7 +32,7 @@ import ( func TestKubeadmConfigConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) kubeadmConfigName := fmt.Sprintf("test-kubeadmconfig-%s", util.RandomString(5)) kubeadmConfig := &KubeadmConfig{ @@ -43,15 +43,15 @@ func TestKubeadmConfigConversion(t *testing.T) { Spec: fakeKubeadmConfigSpec, } - g.Expect(testEnv.Create(ctx, kubeadmConfig)).To(Succeed()) + g.Expect(env.Create(ctx, kubeadmConfig)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, kubeadmConfig) } func TestKubeadmConfigTemplateConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) kubeadmConfigTemplateName := fmt.Sprintf("test-kubeadmconfigtemplate-%s", util.RandomString(5)) kubeadmConfigTemplate := &KubeadmConfigTemplate{ @@ -66,9 +66,9 @@ func TestKubeadmConfigTemplateConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, kubeadmConfigTemplate)).To(Succeed()) + g.Expect(env.Create(ctx, kubeadmConfigTemplate)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, kubeadmConfigTemplate) } diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_reconciler_test.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_reconciler_test.go index ae25be5d2cd5..a7b875f67028 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_reconciler_test.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_reconciler_test.go @@ -33,16 +33,16 @@ func TestKubeadmConfigReconciler(t *testing.T) { g := NewWithT(t) cluster := newCluster("cluster1") - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) machine := newMachine(cluster, "my-machine") - g.Expect(testEnv.Create(ctx, machine)).To(Succeed()) + g.Expect(env.Create(ctx, machine)).To(Succeed()) config := newKubeadmConfig(machine, "my-machine-config") - g.Expect(testEnv.Create(ctx, config)).To(Succeed()) + g.Expect(env.Create(ctx, config)).To(Succeed()) reconciler := KubeadmConfigReconciler{ - Client: testEnv, + Client: env, } t.Log("Calling reconcile should requeue") result, err := reconciler.Reconcile(ctx, ctrl.Request{ diff --git a/bootstrap/kubeadm/controllers/suite_test.go b/bootstrap/kubeadm/controllers/suite_test.go index 2e6462577fa2..4d44f19d4f43 100644 --- a/bootstrap/kubeadm/controllers/suite_test.go +++ b/bootstrap/kubeadm/controllers/suite_test.go @@ -21,33 +21,33 @@ import ( "os" "testing" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { fmt.Println("Creating new test environment") - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { fmt.Println("Starting the manager") - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() code := m.Run() fmt.Println("Tearing down test suite") - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop envtest: %v", err)) } diff --git a/bootstrap/util/suite_test.go b/bootstrap/util/suite_test.go index da7bea3b5359..c774b9148f88 100644 --- a/bootstrap/util/suite_test.go +++ b/bootstrap/util/suite_test.go @@ -21,31 +21,31 @@ import ( "os" "testing" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { // Bootstrapping test environment - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() // Run tests code := m.Run() // Tearing down the test environment - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) } diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index 8963fd575672..0b1cf8e54917 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -47,16 +47,16 @@ func TestClusterReconciler(t *testing.T) { } // Create the Cluster object and expect the Reconcile and Deployment to be created - g.Expect(testEnv.Create(ctx, instance)).To(Succeed()) + g.Expect(env.Create(ctx, instance)).To(Succeed()) key := client.ObjectKey{Namespace: instance.Namespace, Name: instance.Name} defer func() { - err := testEnv.Delete(ctx, instance) + err := env.Delete(ctx, instance) g.Expect(err).NotTo(HaveOccurred()) }() // Make sure the Cluster exists. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, instance); err != nil { + if err := env.Get(ctx, key, instance); err != nil { return false } return len(instance.Finalizers) > 0 @@ -73,16 +73,16 @@ func TestClusterReconciler(t *testing.T) { Namespace: "default", }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { - err := testEnv.Delete(ctx, cluster) + err := env.Delete(ctx, cluster) g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, cluster); err != nil { + if err := env.Get(ctx, key, cluster); err != nil { return false } return len(cluster.Finalizers) > 0 @@ -90,7 +90,7 @@ func TestClusterReconciler(t *testing.T) { // Patch g.Eventually(func() bool { - ph, err := patch.NewHelper(cluster, testEnv) + ph, err := patch.NewHelper(cluster, env) g.Expect(err).NotTo(HaveOccurred()) cluster.Spec.InfrastructureRef = &corev1.ObjectReference{Name: "test"} cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{Name: "test-too"} @@ -101,7 +101,7 @@ func TestClusterReconciler(t *testing.T) { // Assertions g.Eventually(func() bool { instance := &clusterv1.Cluster{} - if err := testEnv.Get(ctx, key, instance); err != nil { + if err := env.Get(ctx, key, instance); err != nil { return false } return instance.Spec.InfrastructureRef != nil && @@ -119,16 +119,16 @@ func TestClusterReconciler(t *testing.T) { Namespace: "default", }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { - err := testEnv.Delete(ctx, cluster) + err := env.Delete(ctx, cluster) g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, cluster); err != nil { + if err := env.Get(ctx, key, cluster); err != nil { return false } return len(cluster.Finalizers) > 0 @@ -136,7 +136,7 @@ func TestClusterReconciler(t *testing.T) { // Patch g.Eventually(func() bool { - ph, err := patch.NewHelper(cluster, testEnv) + ph, err := patch.NewHelper(cluster, env) g.Expect(err).NotTo(HaveOccurred()) cluster.Status.InfrastructureReady = true g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) @@ -146,7 +146,7 @@ func TestClusterReconciler(t *testing.T) { // Assertions g.Eventually(func() bool { instance := &clusterv1.Cluster{} - if err := testEnv.Get(ctx, key, instance); err != nil { + if err := env.Get(ctx, key, instance); err != nil { return false } return instance.Status.InfrastructureReady @@ -164,16 +164,16 @@ func TestClusterReconciler(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { - err := testEnv.Delete(ctx, cluster) + err := env.Delete(ctx, cluster) g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, cluster); err != nil { + if err := env.Get(ctx, key, cluster); err != nil { return false } return len(cluster.Finalizers) > 0 @@ -181,7 +181,7 @@ func TestClusterReconciler(t *testing.T) { // Patch g.Eventually(func() bool { - ph, err := patch.NewHelper(cluster, testEnv) + ph, err := patch.NewHelper(cluster, env) g.Expect(err).NotTo(HaveOccurred()) cluster.Status.InfrastructureReady = true cluster.Spec.InfrastructureRef = &corev1.ObjectReference{Name: "test"} @@ -192,7 +192,7 @@ func TestClusterReconciler(t *testing.T) { // Assertions g.Eventually(func() bool { instance := &clusterv1.Cluster{} - if err := testEnv.Get(ctx, key, instance); err != nil { + if err := env.Get(ctx, key, instance); err != nil { return false } return instance.Status.InfrastructureReady && @@ -211,16 +211,16 @@ func TestClusterReconciler(t *testing.T) { Namespace: "default", }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { - err := testEnv.Delete(ctx, cluster) + err := env.Delete(ctx, cluster) g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, cluster); err != nil { + if err := env.Get(ctx, key, cluster); err != nil { return false } return len(cluster.Finalizers) > 0 @@ -228,7 +228,7 @@ func TestClusterReconciler(t *testing.T) { // Remove finalizers g.Eventually(func() bool { - ph, err := patch.NewHelper(cluster, testEnv) + ph, err := patch.NewHelper(cluster, env) g.Expect(err).NotTo(HaveOccurred()) cluster.SetFinalizers([]string{}) g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) @@ -240,7 +240,7 @@ func TestClusterReconciler(t *testing.T) { // Check finalizers are re-applied g.Eventually(func() []string { instance := &clusterv1.Cluster{} - if err := testEnv.Get(ctx, key, instance); err != nil { + if err := env.Get(ctx, key, instance); err != nil { return []string{"not-empty"} } return instance.Finalizers @@ -257,17 +257,17 @@ func TestClusterReconciler(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { - err := testEnv.Delete(ctx, cluster) + err := env.Delete(ctx, cluster) g.Expect(err).NotTo(HaveOccurred()) }() - g.Expect(testEnv.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) // Wait for reconciliation to happen. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, cluster); err != nil { + if err := env.Get(ctx, key, cluster); err != nil { return false } return len(cluster.Finalizers) > 0 @@ -284,7 +284,7 @@ func TestClusterReconciler(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, node)).To(Succeed()) + g.Expect(env.Create(ctx, node)).To(Succeed()) machine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ @@ -303,10 +303,10 @@ func TestClusterReconciler(t *testing.T) { }, } machine.Spec.Bootstrap.DataSecretName = pointer.StringPtr("test6-bootstrapdata") - g.Expect(testEnv.Create(ctx, machine)).To(Succeed()) + g.Expect(env.Create(ctx, machine)).To(Succeed()) key = client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} defer func() { - err := testEnv.Delete(ctx, machine) + err := env.Delete(ctx, machine) g.Expect(err).NotTo(HaveOccurred()) }() @@ -318,7 +318,7 @@ func TestClusterReconciler(t *testing.T) { // we continue to see test timeouts here, that will likely point to something else being the problem, but // I've yet to determine any other possibility for the test flakes. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, machine); err != nil { + if err := env.Get(ctx, key, machine); err != nil { return false } return len(machine.Finalizers) > 0 @@ -327,7 +327,7 @@ func TestClusterReconciler(t *testing.T) { // Assertion key = client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, cluster); err != nil { + if err := env.Get(ctx, key, cluster); err != nil { return false } return conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) diff --git a/controllers/machine_controller_node_labels_test.go b/controllers/machine_controller_node_labels_test.go index d62621d38c35..2d85aecfdd3d 100644 --- a/controllers/machine_controller_node_labels_test.go +++ b/controllers/machine_controller_node_labels_test.go @@ -39,7 +39,7 @@ import ( func TestReconcileInterruptibleNodeLabel(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "test-interruptible-node-label") + ns, err := env.CreateNamespace(ctx, "test-interruptible-node-label") g.Expect(err).ToNot(HaveOccurred()) infraMachine := &unstructured.Unstructured{ @@ -97,24 +97,24 @@ func TestReconcileInterruptibleNodeLabel(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) - g.Expect(testEnv.Create(ctx, node)).To(Succeed()) - g.Expect(testEnv.Create(ctx, infraMachine)).To(Succeed()) - g.Expect(testEnv.Create(ctx, machine)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, node)).To(Succeed()) + g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) + g.Expect(env.Create(ctx, machine)).To(Succeed()) // Patch infra machine status - patchHelper, err := patch.NewHelper(infraMachine, testEnv) + patchHelper, err := patch.NewHelper(infraMachine, env) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "interruptible")).To(Succeed()) g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, node, infraMachine, machine) r := &MachineReconciler{ - Client: testEnv.Client, - Tracker: remote.NewTestClusterCacheTracker(log.NullLogger{}, testEnv.Client, scheme.Scheme, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), + Client: env.Client, + Tracker: remote.NewTestClusterCacheTracker(log.NullLogger{}, env.Client, scheme.Scheme, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}), recorder: record.NewFakeRecorder(32), } @@ -124,7 +124,7 @@ func TestReconcileInterruptibleNodeLabel(t *testing.T) { // Check if node gets interruptible label g.Eventually(func() bool { updatedNode := &corev1.Node{} - err := testEnv.Get(ctx, client.ObjectKey{Name: node.Name}, updatedNode) + err := env.Get(ctx, client.ObjectKey{Name: node.Name}, updatedNode) if err != nil { return false } diff --git a/controllers/machine_controller_test.go b/controllers/machine_controller_test.go index 815fe454f439..ac1f4e42f563 100644 --- a/controllers/machine_controller_test.go +++ b/controllers/machine_controller_test.go @@ -43,7 +43,7 @@ import ( func TestWatches(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "test-machine-watches") + ns, err := env.CreateNamespace(ctx, "test-machine-watches") g.Expect(err).ToNot(HaveOccurred()) infraMachine := &unstructured.Unstructured{ @@ -99,24 +99,24 @@ func TestWatches(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, testCluster)).To(BeNil()) - g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) - g.Expect(testEnv.Create(ctx, defaultBootstrap)).To(BeNil()) - g.Expect(testEnv.Create(ctx, node)).To(Succeed()) - g.Expect(testEnv.Create(ctx, infraMachine)).To(BeNil()) + g.Expect(env.Create(ctx, testCluster)).To(BeNil()) + g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + g.Expect(env.Create(ctx, defaultBootstrap)).To(BeNil()) + g.Expect(env.Create(ctx, node)).To(Succeed()) + g.Expect(env.Create(ctx, infraMachine)).To(BeNil()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, testCluster, defaultBootstrap) // Patch infra machine ready - patchHelper, err := patch.NewHelper(infraMachine, testEnv) + patchHelper, err := patch.NewHelper(infraMachine, env) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) // Patch bootstrap ready - patchHelper, err = patch.NewHelper(defaultBootstrap, testEnv) + patchHelper, err = patch.NewHelper(defaultBootstrap, env) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) @@ -146,30 +146,30 @@ func TestWatches(t *testing.T) { }}, } - g.Expect(testEnv.Create(ctx, machine)).To(BeNil()) + g.Expect(env.Create(ctx, machine)).To(BeNil()) defer func() { - g.Expect(testEnv.Cleanup(ctx, machine)).To(Succeed()) + g.Expect(env.Cleanup(ctx, machine)).To(Succeed()) }() // Wait for reconciliation to happen. // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, machine); err != nil { + if err := env.Get(ctx, key, machine); err != nil { return false } return machine.Status.NodeRef != nil }, timeout).Should(BeTrue()) // Node deletion will trigger node watchers and a request will be added to the queue. - g.Expect(testEnv.Delete(ctx, node)).To(Succeed()) + g.Expect(env.Delete(ctx, node)).To(Succeed()) // TODO: Once conditions are in place, check if node deletion triggered a reconcile. // Delete infra machine, external tracker will trigger reconcile // and machine Status.FailureReason should be non-nil after reconcileInfrastructure - g.Expect(testEnv.Delete(ctx, infraMachine)).To(Succeed()) + g.Expect(env.Delete(ctx, infraMachine)).To(Succeed()) g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, machine); err != nil { + if err := env.Get(ctx, key, machine); err != nil { return false } return machine.Status.FailureMessage != nil @@ -246,12 +246,12 @@ func TestMachine_Reconcile(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, testCluster)).To(BeNil()) - g.Expect(testEnv.Create(ctx, infraMachine)).To(BeNil()) - g.Expect(testEnv.Create(ctx, defaultBootstrap)).To(BeNil()) + g.Expect(env.Create(ctx, testCluster)).To(BeNil()) + g.Expect(env.Create(ctx, infraMachine)).To(BeNil()) + g.Expect(env.Create(ctx, defaultBootstrap)).To(BeNil()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(testCluster) machine := &clusterv1.Machine{ @@ -280,13 +280,13 @@ func TestMachine_Reconcile(t *testing.T) { }, }, } - g.Expect(testEnv.Create(ctx, machine)).To(BeNil()) + g.Expect(env.Create(ctx, machine)).To(BeNil()) key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} // Wait for reconciliation to happen when infra and bootstrap objects are not ready. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, machine); err != nil { + if err := env.Get(ctx, key, machine); err != nil { return false } return len(machine.Finalizers) > 0 @@ -295,16 +295,16 @@ func TestMachine_Reconcile(t *testing.T) { // Set bootstrap ready. bootstrapPatch := client.MergeFrom(defaultBootstrap.DeepCopy()) g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).NotTo(HaveOccurred()) - g.Expect(testEnv.Status().Patch(ctx, defaultBootstrap, bootstrapPatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, defaultBootstrap, bootstrapPatch)).To(Succeed()) // Set infrastructure ready. infraMachinePatch := client.MergeFrom(infraMachine.DeepCopy()) g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) - g.Expect(testEnv.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) // Wait for Machine Ready Condition to become True. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, machine); err != nil { + if err := env.Get(ctx, key, machine); err != nil { return false } if conditions.Has(machine, clusterv1.InfrastructureReadyCondition) != true { @@ -314,10 +314,10 @@ func TestMachine_Reconcile(t *testing.T) { return readyCondition.Status == corev1.ConditionTrue }, timeout).Should(BeTrue()) - g.Expect(testEnv.Delete(ctx, machine)).NotTo(HaveOccurred()) + g.Expect(env.Delete(ctx, machine)).NotTo(HaveOccurred()) // Wait for Machine to be deleted. g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, machine); err != nil { + if err := env.Get(ctx, key, machine); err != nil { if apierrors.IsNotFound(err) { return true } @@ -328,7 +328,7 @@ func TestMachine_Reconcile(t *testing.T) { // Check if Machine deletion successfully deleted infrastructure external reference. keyInfra := client.ObjectKey{Name: infraMachine.GetName(), Namespace: infraMachine.GetNamespace()} g.Eventually(func() bool { - if err := testEnv.Get(ctx, keyInfra, infraMachine); err != nil { + if err := env.Get(ctx, keyInfra, infraMachine); err != nil { if apierrors.IsNotFound(err) { return true } @@ -339,7 +339,7 @@ func TestMachine_Reconcile(t *testing.T) { // Check if Machine deletion successfully deleted bootstrap external reference. keyBootstrap := client.ObjectKey{Name: defaultBootstrap.GetName(), Namespace: defaultBootstrap.GetNamespace()} g.Eventually(func() bool { - if err := testEnv.Get(ctx, keyBootstrap, defaultBootstrap); err != nil { + if err := env.Get(ctx, keyBootstrap, defaultBootstrap); err != nil { if apierrors.IsNotFound(err) { return true } @@ -1623,7 +1623,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { func TestNodeToMachine(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "test-node-to-machine") + ns, err := env.CreateNamespace(ctx, "test-node-to-machine") g.Expect(err).ToNot(HaveOccurred()) // Set up cluster, machines and nodes to test against. @@ -1711,32 +1711,32 @@ func TestNodeToMachine(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, testCluster)).To(BeNil()) - g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) - g.Expect(testEnv.Create(ctx, defaultBootstrap)).To(BeNil()) - g.Expect(testEnv.Create(ctx, targetNode)).To(Succeed()) - g.Expect(testEnv.Create(ctx, randomNode)).To(Succeed()) - g.Expect(testEnv.Create(ctx, infraMachine)).To(BeNil()) - g.Expect(testEnv.Create(ctx, infraMachine2)).To(BeNil()) + g.Expect(env.Create(ctx, testCluster)).To(BeNil()) + g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + g.Expect(env.Create(ctx, defaultBootstrap)).To(BeNil()) + g.Expect(env.Create(ctx, targetNode)).To(Succeed()) + g.Expect(env.Create(ctx, randomNode)).To(Succeed()) + g.Expect(env.Create(ctx, infraMachine)).To(BeNil()) + g.Expect(env.Create(ctx, infraMachine2)).To(BeNil()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, testCluster, defaultBootstrap) // Patch infra expectedMachine ready - patchHelper, err := patch.NewHelper(infraMachine, testEnv) + patchHelper, err := patch.NewHelper(infraMachine, env) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) // Patch infra randomMachine ready - patchHelper, err = patch.NewHelper(infraMachine2, testEnv) + patchHelper, err = patch.NewHelper(infraMachine2, env) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(unstructured.SetNestedField(infraMachine2.Object, true, "status", "ready")).To(Succeed()) g.Expect(patchHelper.Patch(ctx, infraMachine2, patch.WithStatusObservedGeneration{})).To(Succeed()) // Patch bootstrap ready - patchHelper, err = patch.NewHelper(defaultBootstrap, testEnv) + patchHelper, err = patch.NewHelper(defaultBootstrap, env) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) @@ -1766,16 +1766,16 @@ func TestNodeToMachine(t *testing.T) { }}, } - g.Expect(testEnv.Create(ctx, expectedMachine)).To(BeNil()) + g.Expect(env.Create(ctx, expectedMachine)).To(BeNil()) defer func() { - g.Expect(testEnv.Cleanup(ctx, expectedMachine)).To(Succeed()) + g.Expect(env.Cleanup(ctx, expectedMachine)).To(Succeed()) }() // Wait for reconciliation to happen. // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. key := client.ObjectKey{Name: expectedMachine.Name, Namespace: expectedMachine.Namespace} g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, expectedMachine); err != nil { + if err := env.Get(ctx, key, expectedMachine); err != nil { return false } return expectedMachine.Status.NodeRef != nil @@ -1805,16 +1805,16 @@ func TestNodeToMachine(t *testing.T) { }}, } - g.Expect(testEnv.Create(ctx, randomMachine)).To(BeNil()) + g.Expect(env.Create(ctx, randomMachine)).To(BeNil()) defer func() { - g.Expect(testEnv.Cleanup(ctx, randomMachine)).To(Succeed()) + g.Expect(env.Cleanup(ctx, randomMachine)).To(Succeed()) }() // Wait for reconciliation to happen. // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. key = client.ObjectKey{Name: randomMachine.Name, Namespace: randomMachine.Namespace} g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, randomMachine); err != nil { + if err := env.Get(ctx, key, randomMachine); err != nil { return false } return randomMachine.Status.NodeRef != nil @@ -1871,7 +1871,7 @@ func TestNodeToMachine(t *testing.T) { } r := &MachineReconciler{ - Client: testEnv, + Client: env, } for _, node := range fakeNodes { request := r.nodeToMachine(node) diff --git a/controllers/machinedeployment_controller_test.go b/controllers/machinedeployment_controller_test.go index d6c60cc9fd1e..3e4b1118fdc5 100644 --- a/controllers/machinedeployment_controller_test.go +++ b/controllers/machinedeployment_controller_test.go @@ -43,18 +43,18 @@ func TestMachineDeploymentReconciler(t *testing.T) { setup := func(t *testing.T, g *WithT) { t.Log("Creating the namespace") - g.Expect(testEnv.Create(ctx, namespace)).To(Succeed()) + g.Expect(env.Create(ctx, namespace)).To(Succeed()) t.Log("Creating the Cluster") - g.Expect(testEnv.Create(ctx, testCluster)).To(Succeed()) + g.Expect(env.Create(ctx, testCluster)).To(Succeed()) t.Log("Creating the Cluster Kubeconfig Secret") - g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) } teardown := func(t *testing.T, g *WithT) { t.Log("Deleting the Cluster") - g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + g.Expect(env.Delete(ctx, testCluster)).To(Succeed()) t.Log("Deleting the namespace") - g.Expect(testEnv.Delete(ctx, namespace)).To(Succeed()) + g.Expect(env.Delete(ctx, namespace)).To(Succeed()) } t.Run("Should reconcile a MachineDeployment", func(t *testing.T) { @@ -138,20 +138,20 @@ func TestMachineDeploymentReconciler(t *testing.T) { infraTmpl.SetName("md-template") infraTmpl.SetNamespace(namespace.Name) t.Log("Creating the infrastructure template") - g.Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) + g.Expect(env.Create(ctx, infraTmpl)).To(Succeed()) // Create the MachineDeployment object and expect Reconcile to be called. t.Log("Creating the MachineDeployment") - g.Expect(testEnv.Create(ctx, deployment)).To(Succeed()) + g.Expect(env.Create(ctx, deployment)).To(Succeed()) defer func() { t.Log("Deleting the MachineDeployment") - g.Expect(testEnv.Delete(ctx, deployment)).To(Succeed()) + g.Expect(env.Delete(ctx, deployment)).To(Succeed()) }() t.Log("Verifying the MachineDeployment has a cluster label and ownerRef") g.Eventually(func() bool { key := client.ObjectKey{Name: deployment.Name, Namespace: deployment.Namespace} - if err := testEnv.Get(ctx, key, deployment); err != nil { + if err := env.Get(ctx, key, deployment); err != nil { return false } if len(deployment.Labels) == 0 || deployment.Labels[clusterv1.ClusterLabelName] != testCluster.Name { @@ -167,7 +167,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { t.Log("Verifying the MachineSet was created") machineSets := &clusterv1.MachineSetList{} g.Eventually(func() int { - if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { + if err := env.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) @@ -178,7 +178,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { t.Log("Verifying the linked infrastructure template has a cluster owner reference") g.Eventually(func() bool { - obj, err := external.Get(ctx, testEnv, &deployment.Spec.Template.Spec.InfrastructureRef, deployment.Namespace) + obj, err := external.Get(ctx, env, &deployment.Spec.Template.Spec.InfrastructureRef, deployment.Namespace) if err != nil { return false } @@ -195,7 +195,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { t.Log("Verify expected number of machines are created") machines := &clusterv1.MachineList{} g.Eventually(func() int { - if err := testEnv.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { + if err := env.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { return -1 } return len(machines.Items) @@ -215,9 +215,9 @@ func TestMachineDeploymentReconciler(t *testing.T) { // Delete firstMachineSet and expect Reconcile to be called to replace it. // t.Log("Deleting the initial MachineSet") - g.Expect(testEnv.Delete(ctx, &firstMachineSet)).To(Succeed()) + g.Expect(env.Delete(ctx, &firstMachineSet)).To(Succeed()) g.Eventually(func() bool { - if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { + if err := env.List(ctx, machineSets, msListOpts...); err != nil { return false } for _, ms := range machineSets.Items { @@ -234,10 +234,10 @@ func TestMachineDeploymentReconciler(t *testing.T) { secondMachineSet := machineSets.Items[0] t.Log("Scaling the MachineDeployment to 3 replicas") modifyFunc := func(d *clusterv1.MachineDeployment) { d.Spec.Replicas = pointer.Int32Ptr(3) } - g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) g.Eventually(func() int { key := client.ObjectKey{Name: secondMachineSet.Name, Namespace: secondMachineSet.Namespace} - if err := testEnv.Get(ctx, key, &secondMachineSet); err != nil { + if err := env.Get(ctx, key, &secondMachineSet); err != nil { return -1 } return int(*secondMachineSet.Spec.Replicas) @@ -248,9 +248,9 @@ func TestMachineDeploymentReconciler(t *testing.T) { // t.Log("Setting a label on the MachineDeployment") modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Template.Labels["updated"] = "true" } - g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) g.Eventually(func() int { - if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { + if err := env.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) @@ -260,9 +260,9 @@ func TestMachineDeploymentReconciler(t *testing.T) { modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Strategy.RollingUpdate.DeletePolicy = pointer.StringPtr("Newest") } - g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) g.Eventually(func() string { - if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { + if err := env.List(ctx, machineSets, msListOpts...); err != nil { return "" } return machineSets.Items[0].Spec.DeletePolicy @@ -274,7 +274,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { // Verify that all the MachineSets have the expected OwnerRef. t.Log("Verifying MachineSet owner references") g.Eventually(func() bool { - if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { + if err := env.List(ctx, machineSets, msListOpts...); err != nil { return false } for i := 0; i < len(machineSets.Items); i++ { @@ -302,7 +302,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { // Set the all non-deleted machines as ready with a NodeRef, so the MachineSet controller can proceed // to properly set AvailableReplicas. foundMachines := &clusterv1.MachineList{} - g.Expect(testEnv.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(env.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) for i := 0; i < len(foundMachines.Items); i++ { m := foundMachines.Items[i] // Skip over deleted Machines @@ -317,7 +317,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { fakeMachineNodeRef(&m, providerID, g) } - if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { + if err := env.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) @@ -341,12 +341,12 @@ func TestMachineDeploymentReconciler(t *testing.T) { d.Spec.Selector.MatchLabels = newLabels d.Spec.Template.Labels = newLabels } - g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) t.Log("Verifying if a new MachineSet with updated labels are created") g.Eventually(func() int { listOpts := client.MatchingLabels(newLabels) - if err := testEnv.List(ctx, machineSets, listOpts); err != nil { + if err := env.List(ctx, machineSets, listOpts); err != nil { return -1 } return len(machineSets.Items) @@ -358,7 +358,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { // Set the all non-deleted machines as ready with a NodeRef, so the MachineSet controller can proceed // to properly set AvailableReplicas. foundMachines := &clusterv1.MachineList{} - g.Expect(testEnv.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(env.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) for i := 0; i < len(foundMachines.Items); i++ { m := foundMachines.Items[i] if !m.DeletionTimestamp.IsZero() { @@ -373,7 +373,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { } listOpts := client.MatchingLabels(newLabels) - if err := testEnv.List(ctx, machineSets, listOpts); err != nil { + if err := env.List(ctx, machineSets, listOpts); err != nil { return false } return machineSets.Items[0].Status.Replicas == *deployment.Spec.Replicas @@ -382,7 +382,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { t.Log("Verifying MachineSets with old labels are deleted") g.Eventually(func() int { listOpts := client.MatchingLabels(oldLabels) - if err := testEnv.List(ctx, machineSets, listOpts); err != nil { + if err := env.List(ctx, machineSets, listOpts); err != nil { return -1 } diff --git a/controllers/machinehealthcheck_controller_test.go b/controllers/machinehealthcheck_controller_test.go index 64545d9ba0bf..97e4b57f86a1 100644 --- a/controllers/machinehealthcheck_controller_test.go +++ b/controllers/machinehealthcheck_controller_test.go @@ -61,13 +61,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) mhc.Labels = map[string]string{} - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) g.Eventually(func() map[string]string { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -84,13 +84,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { clusterv1.ClusterLabelName: "wrong-cluster", } - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) g.Eventually(func() map[string]string { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -107,13 +107,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { "extra-label": "1", } - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) g.Eventually(func() map[string]string { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -132,13 +132,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) mhc.OwnerReferences = []metav1.OwnerReference{} - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) g.Eventually(func() []metav1.OwnerReference { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { fmt.Printf("error cannot retrieve mhc in ctx: %v", err) return nil @@ -159,13 +159,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { {Kind: "Foo", APIVersion: "foo.bar.baz/v1", Name: "Bar", UID: "12345"}, } - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) g.Eventually(func() []metav1.OwnerReference { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -184,9 +184,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines matching the MHC's label selector. @@ -214,7 +214,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -238,7 +238,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { g := NewWithT(t) cluster := createNamespaceAndCluster(g) - patchHelper, err := patch.NewHelper(cluster, testEnv.Client) + patchHelper, err := patch.NewHelper(cluster, env.Client) g.Expect(err).To(BeNil()) conditions.MarkFalse(cluster, clusterv1.InfrastructureReadyCondition, "SomeReason", clusterv1.ConditionSeverityError, "") @@ -246,9 +246,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -267,7 +267,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -293,9 +293,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -315,7 +315,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -341,9 +341,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -372,7 +372,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -398,9 +398,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -430,7 +430,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -456,9 +456,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -488,7 +488,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -516,9 +516,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { maxUnhealthy := intstr.Parse("40%") mhc.Spec.MaxUnhealthy = &maxUnhealthy - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -547,7 +547,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -572,7 +572,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -590,7 +590,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -614,9 +614,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { unhealthyRange := "[1-3]" mhc.Spec.UnhealthyRange = &unhealthyRange - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -645,7 +645,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -673,9 +673,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { unhealthyRange := "[3-5]" mhc.Spec.UnhealthyRange = &unhealthyRange - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -704,7 +704,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -729,7 +729,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -747,7 +747,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -769,7 +769,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // After the cluster exists, we have to set the infrastructure ready condition; otherwise, MachineHealthChecks // will never fail when nodeStartupTimeout is exceeded. - patchHelper, err := patch.NewHelper(cluster, testEnv.GetClient()) + patchHelper, err := patch.NewHelper(cluster, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) @@ -778,9 +778,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: 5 * time.Hour} - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -809,7 +809,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -831,7 +831,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -849,7 +849,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -874,9 +874,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: time.Second} - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -907,7 +907,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the MHC status matches. We have two healthy machines and // one unhealthy. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { fmt.Printf("error retrieving mhc: %v", err) return nil @@ -930,7 +930,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -949,7 +949,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -973,9 +973,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -996,15 +996,15 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Forcibly remove the last machine's node. g.Eventually(func() bool { nodeToBeRemoved := nodes[2] - if err := testEnv.Delete(ctx, nodeToBeRemoved); err != nil { + if err := env.Delete(ctx, nodeToBeRemoved); err != nil { return apierrors.IsNotFound(err) } - return apierrors.IsNotFound(testEnv.Get(ctx, util.ObjectKey(nodeToBeRemoved), nodeToBeRemoved)) + return apierrors.IsNotFound(env.Get(ctx, util.ObjectKey(nodeToBeRemoved), nodeToBeRemoved)) }).Should(BeTrue()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1026,7 +1026,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1044,7 +1044,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1066,9 +1066,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -1088,7 +1088,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1117,11 +1117,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), }, } - g.Expect(testEnv.Status().Patch(ctx, node, nodePatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, node, nodePatch)).To(Succeed()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1142,7 +1142,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1160,7 +1160,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been marked for remediation g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1211,7 +1211,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { infraTmpl.SetGenerateName("mhc-ms-template-") infraTmpl.SetNamespace(mhc.Namespace) - g.Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) + g.Expect(env.Create(ctx, infraTmpl)).To(Succeed()) machineSet := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ @@ -1241,12 +1241,12 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { }, } machineSet.Default() - g.Expect(testEnv.Create(ctx, machineSet)).To(Succeed()) + g.Expect(env.Create(ctx, machineSet)).To(Succeed()) // Ensure machines have been created. g.Eventually(func() int { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1258,10 +1258,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Create the MachineHealthCheck instance. mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: time.Second} - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) // defer cleanup for all the objects that have been created defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc, infraTmpl, machineSet) // Pause the MachineSet reconciler to delay the deletion of the @@ -1271,12 +1271,12 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { machineSet.Annotations = map[string]string{ clusterv1.PausedAnnotation: "", } - g.Expect(testEnv.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) + g.Expect(env.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1295,7 +1295,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { var unhealthyMachine *clusterv1.Machine g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1314,12 +1314,12 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Unpause the MachineSet reconciler. machineSetPatch = client.MergeFrom(machineSet.DeepCopy()) delete(machineSet.Annotations, clusterv1.PausedAnnotation) - g.Expect(testEnv.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) + g.Expect(env.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) // Make sure the Machine gets deleted. g.Eventually(func() bool { machine := unhealthyMachine.DeepCopy() - err := testEnv.Get(ctx, util.ObjectKey(unhealthyMachine), machine) + err := env.Get(ctx, util.ObjectKey(unhealthyMachine), machine) return apierrors.IsNotFound(err) || !machine.DeletionTimestamp.IsZero() }, timeout, 100*time.Millisecond) }) @@ -1332,9 +1332,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -1354,7 +1354,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1377,7 +1377,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { machines[0].Annotations = map[string]string{ clusterv1.PausedAnnotation: "", } - g.Expect(testEnv.Patch(ctx, machines[0], machinePatch)).To(Succeed()) + g.Expect(env.Patch(ctx, machines[0], machinePatch)).To(Succeed()) // Transition the node to unhealthy. node := nodes[0] @@ -1389,11 +1389,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), }, } - g.Expect(testEnv.Status().Patch(ctx, node, nodePatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, node, nodePatch)).To(Succeed()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1415,7 +1415,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1433,7 +1433,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1473,7 +1473,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { infraRemediationTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha3") infraRemediationTmpl.SetGenerateName("remediation-template-name-") infraRemediationTmpl.SetNamespace(cluster.Namespace) - g.Expect(testEnv.Create(ctx, infraRemediationTmpl)).To(Succeed()) + g.Expect(env.Create(ctx, infraRemediationTmpl)).To(Succeed()) remediationTemplate := &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3", @@ -1483,9 +1483,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) mhc.Spec.RemediationTemplate = remediationTemplate - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc, infraRemediationTmpl) // Healthy nodes and machines. @@ -1505,7 +1505,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1534,11 +1534,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), }, } - g.Expect(testEnv.Status().Patch(ctx, node, nodePatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, node, nodePatch)).To(Succeed()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1560,7 +1560,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1587,7 +1587,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { Namespace: machines[0].Namespace, Name: machines[0].Name, } - err := testEnv.Get(ctx, key, obj) + err := env.Get(ctx, key, obj) if err != nil { return nil } @@ -1621,7 +1621,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { infraRemediationTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha3") infraRemediationTmpl.SetGenerateName("remediation-template-name-") infraRemediationTmpl.SetNamespace(cluster.Namespace) - g.Expect(testEnv.Create(ctx, infraRemediationTmpl)).To(Succeed()) + g.Expect(env.Create(ctx, infraRemediationTmpl)).To(Succeed()) remediationTemplate := &corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3", @@ -1631,9 +1631,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) mhc.Spec.RemediationTemplate = remediationTemplate - g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) + g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc, infraRemediationTmpl) // Healthy nodes and machines. @@ -1653,7 +1653,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1682,11 +1682,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), }, } - g.Expect(testEnv.Status().Patch(ctx, node, nodePatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, node, nodePatch)).To(Succeed()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1708,7 +1708,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1733,11 +1733,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), }, } - g.Expect(testEnv.Status().Patch(ctx, node, nodePatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, node, nodePatch)).To(Succeed()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) + err := env.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -1759,7 +1759,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv.List(ctx, machines, client.MatchingLabels{ + err := env.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -1786,7 +1786,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { Namespace: machines[0].Namespace, Name: machines[0].Name, } - err := testEnv.Get(ctx, key, obj) + err := env.Get(ctx, key, obj) if err != nil { return nil } @@ -2235,7 +2235,7 @@ func TestGetMaxUnhealthy(t *testing.T) { func ownerReferenceForCluster(ctx context.Context, g *WithT, c *clusterv1.Cluster) metav1.OwnerReference { // Fetch the cluster to populate the UID cc := &clusterv1.Cluster{} - g.Expect(testEnv.GetClient().Get(ctx, util.ObjectKey(c), cc)).To(Succeed()) + g.Expect(env.GetClient().Get(ctx, util.ObjectKey(c), cc)).To(Succeed()) return metav1.OwnerReference{ APIVersion: clusterv1.GroupVersion.String(), @@ -2249,7 +2249,7 @@ func ownerReferenceForCluster(ctx context.Context, g *WithT, c *clusterv1.Cluste // then creates a Cluster and KubeconfigSecret for that cluster in said // namespace. func createNamespaceAndCluster(g *WithT) *clusterv1.Cluster { - ns, err := testEnv.CreateNamespace(ctx, "test-mhc") + ns, err := env.CreateNamespace(ctx, "test-mhc") g.Expect(err).ToNot(HaveOccurred()) cluster := &clusterv1.Cluster{ @@ -2259,23 +2259,23 @@ func createNamespaceAndCluster(g *WithT) *clusterv1.Cluster { }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) // Make sure the cluster is in the cache before proceeding g.Eventually(func() error { var cl clusterv1.Cluster - return testEnv.Get(ctx, util.ObjectKey(cluster), &cl) + return env.Get(ctx, util.ObjectKey(cluster), &cl) }, timeout, 100*time.Millisecond).Should(Succeed()) // This is required for MHC to perform checks - patchHelper, err := patch.NewHelper(cluster, testEnv.Client) + patchHelper, err := patch.NewHelper(cluster, env.Client) g.Expect(err).To(BeNil()) conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) // Wait for cluster in cache to be updated post-patch g.Eventually(func() bool { - err := testEnv.Get(ctx, util.ObjectKey(cluster), cluster) + err := env.Get(ctx, util.ObjectKey(cluster), cluster) if err != nil { return false } @@ -2283,7 +2283,7 @@ func createNamespaceAndCluster(g *WithT) *clusterv1.Cluster { return conditions.IsTrue(cluster, clusterv1.InfrastructureReadyCondition) }, timeout, 100*time.Millisecond).Should(BeTrue()) - g.Expect(testEnv.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) return cluster } @@ -2411,7 +2411,7 @@ func createMachinesWithNodes( machine.Labels[clusterv1.MachineControlPlaneLabelName] = "" } infraMachine, providerID := newInfraMachine(machine) - g.Expect(testEnv.Create(ctx, infraMachine)).To(Succeed()) + g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) infraMachines = append(infraMachines, infraMachine) fmt.Printf("inframachine created: %s\n", infraMachine.GetName()) // Patch the status of the InfraMachine and mark it as ready. @@ -2419,14 +2419,14 @@ func createMachinesWithNodes( // it separately. infraMachinePatch := client.MergeFrom(infraMachine.DeepCopy()) g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) - g.Expect(testEnv.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) machine.Spec.InfrastructureRef = corev1.ObjectReference{ APIVersion: infraMachine.GetAPIVersion(), Kind: infraMachine.GetKind(), Name: infraMachine.GetName(), } - g.Expect(testEnv.Create(ctx, machine)).To(Succeed()) + g.Expect(env.Create(ctx, machine)).To(Succeed()) fmt.Printf("machine created: %s\n", machine.GetName()) // Before moving on we want to ensure that the machine has a valid @@ -2436,14 +2436,14 @@ func createMachinesWithNodes( Name: machine.GetName(), Namespace: machine.GetNamespace(), } - err := testEnv.Get(ctx, k, machine) + err := env.Get(ctx, k, machine) if err != nil { return nil } return machine.Status.LastUpdated }, timeout, 100*time.Millisecond).ShouldNot(BeNil()) - machinePatchHelper, err := patch.NewHelper(machine, testEnv.Client) + machinePatchHelper, err := patch.NewHelper(machine, env.Client) g.Expect(err).To(BeNil()) if o.createNodeRefForMachine { @@ -2457,11 +2457,11 @@ func createMachinesWithNodes( }, } - g.Expect(testEnv.Create(ctx, node)).To(Succeed()) + g.Expect(env.Create(ctx, node)).To(Succeed()) fmt.Printf("node created: %s\n", node.GetName()) // Patch node status - nodePatchHelper, err := patch.NewHelper(node, testEnv.Client) + nodePatchHelper, err := patch.NewHelper(node, env.Client) g.Expect(err).To(BeNil()) node.Status.Conditions = []corev1.NodeCondition{ @@ -2504,15 +2504,15 @@ func createMachinesWithNodes( cleanup := func() { fmt.Println("Cleaning up nodes, machines and infra machines.") for _, n := range nodes { - if err := testEnv.Delete(ctx, n); !apierrors.IsNotFound(err) { + if err := env.Delete(ctx, n); !apierrors.IsNotFound(err) { g.Expect(err).NotTo(HaveOccurred()) } } for _, m := range machines { - g.Expect(testEnv.Delete(ctx, m)).To(Succeed()) + g.Expect(env.Delete(ctx, m)).To(Succeed()) } for _, im := range infraMachines { - if err := testEnv.Delete(ctx, im); !apierrors.IsNotFound(err) { + if err := env.Delete(ctx, im); !apierrors.IsNotFound(err) { g.Expect(err).NotTo(HaveOccurred()) } } diff --git a/controllers/machineset_controller_test.go b/controllers/machineset_controller_test.go index cf7db63e531f..f0daddd086e2 100644 --- a/controllers/machineset_controller_test.go +++ b/controllers/machineset_controller_test.go @@ -43,18 +43,18 @@ func TestMachineSetReconciler(t *testing.T) { setup := func(t *testing.T, g *WithT) { t.Log("Creating the namespace") - g.Expect(testEnv.Create(ctx, namespace)).To(Succeed()) + g.Expect(env.Create(ctx, namespace)).To(Succeed()) t.Log("Creating the Cluster") - g.Expect(testEnv.Create(ctx, testCluster)).To(Succeed()) + g.Expect(env.Create(ctx, testCluster)).To(Succeed()) t.Log("Creating the Cluster Kubeconfig Secret") - g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) } teardown := func(t *testing.T, g *WithT) { t.Log("Deleting the Cluster") - g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + g.Expect(env.Delete(ctx, testCluster)).To(Succeed()) t.Log("Deleting the namespace") - g.Expect(testEnv.Delete(ctx, namespace)).To(Succeed()) + g.Expect(env.Delete(ctx, namespace)).To(Succeed()) } t.Run("Should reconcile a MachineSet", func(t *testing.T) { @@ -127,7 +127,7 @@ func TestMachineSetReconciler(t *testing.T) { bootstrapTmpl.SetAPIVersion("bootstrap.cluster.x-k8s.io/v1alpha4") bootstrapTmpl.SetName("ms-template") bootstrapTmpl.SetNamespace(namespace.Name) - g.Expect(testEnv.Create(ctx, bootstrapTmpl)).To(Succeed()) + g.Expect(env.Create(ctx, bootstrapTmpl)).To(Succeed()) // Create infrastructure template resource. infraResource := map[string]interface{}{ @@ -153,17 +153,17 @@ func TestMachineSetReconciler(t *testing.T) { infraTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha4") infraTmpl.SetName("ms-template") infraTmpl.SetNamespace(namespace.Name) - g.Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) + g.Expect(env.Create(ctx, infraTmpl)).To(Succeed()) // Create the MachineSet. - g.Expect(testEnv.Create(ctx, instance)).To(Succeed()) + g.Expect(env.Create(ctx, instance)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, instance)).To(Succeed()) + g.Expect(env.Delete(ctx, instance)).To(Succeed()) }() t.Log("Verifying the linked bootstrap template has a cluster owner reference") g.Eventually(func() bool { - obj, err := external.Get(ctx, testEnv, instance.Spec.Template.Spec.Bootstrap.ConfigRef, instance.Namespace) + obj, err := external.Get(ctx, env, instance.Spec.Template.Spec.Bootstrap.ConfigRef, instance.Namespace) if err != nil { return false } @@ -178,7 +178,7 @@ func TestMachineSetReconciler(t *testing.T) { t.Log("Verifying the linked infrastructure template has a cluster owner reference") g.Eventually(func() bool { - obj, err := external.Get(ctx, testEnv, &instance.Spec.Template.Spec.InfrastructureRef, instance.Namespace) + obj, err := external.Get(ctx, env, &instance.Spec.Template.Spec.InfrastructureRef, instance.Namespace) if err != nil { return false } @@ -195,7 +195,7 @@ func TestMachineSetReconciler(t *testing.T) { // Verify that we have 2 replicas. g.Eventually(func() int { - if err := testEnv.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { + if err := env.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { return -1 } return len(machines.Items) @@ -206,7 +206,7 @@ func TestMachineSetReconciler(t *testing.T) { infraMachines.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha4") infraMachines.SetKind("InfrastructureMachine") g.Eventually(func() int { - if err := testEnv.List(ctx, infraMachines, client.InNamespace(namespace.Name)); err != nil { + if err := env.List(ctx, infraMachines, client.InNamespace(namespace.Name)); err != nil { return -1 } return len(machines.Items) @@ -225,12 +225,12 @@ func TestMachineSetReconciler(t *testing.T) { // Try to delete 1 machine and check the MachineSet scales back up. machineToBeDeleted := machines.Items[0] - g.Expect(testEnv.Delete(ctx, &machineToBeDeleted)).To(Succeed()) + g.Expect(env.Delete(ctx, &machineToBeDeleted)).To(Succeed()) // Verify that the Machine has been deleted. g.Eventually(func() bool { key := client.ObjectKey{Name: machineToBeDeleted.Name, Namespace: machineToBeDeleted.Namespace} - if err := testEnv.Get(ctx, key, &machineToBeDeleted); apierrors.IsNotFound(err) || !machineToBeDeleted.DeletionTimestamp.IsZero() { + if err := env.Get(ctx, key, &machineToBeDeleted); apierrors.IsNotFound(err) || !machineToBeDeleted.DeletionTimestamp.IsZero() { return true } return false @@ -238,7 +238,7 @@ func TestMachineSetReconciler(t *testing.T) { // Verify that we have 2 replicas. g.Eventually(func() (ready int) { - if err := testEnv.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { + if err := env.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { return -1 } for _, m := range machines.Items { @@ -269,7 +269,7 @@ func TestMachineSetReconciler(t *testing.T) { // Verify that all Machines are Ready. g.Eventually(func() int32 { key := client.ObjectKey{Name: instance.Name, Namespace: instance.Namespace} - if err := testEnv.Get(ctx, key, instance); err != nil { + if err := env.Get(ctx, key, instance); err != nil { return -1 } return instance.Status.AvailableReplicas diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index d2de579c5e4a..d4189b5b549a 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -55,7 +55,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { setup := func(t *testing.T, g *WithT) { t.Log("Setting up a new manager") var err error - mgr, err = manager.New(testEnv.Config, manager.Options{ + mgr, err = manager.New(env.Config, manager.Options{ Scheme: scheme.Scheme, MetricsBindAddress: "0", }) @@ -66,7 +66,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { go func() { g.Expect(mgr.Start(mgrContext)).To(Succeed()) }() - <-testEnv.Manager.Elected() + <-env.Manager.Elected() k8sClient = mgr.GetClient() @@ -91,7 +91,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { g.Expect(k8sClient.Status().Update(ctx, testCluster)).To(Succeed()) t.Log("Creating a test cluster kubeconfig") - g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) testClusterKey = util.ObjectKey(testCluster) @@ -121,7 +121,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { // TODO(community): Fill in these field names. go cct.healthCheckCluster(ctx, &healthCheckInput{ testClusterKey, - testEnv.Config, + env.Config, testPollInterval, testPollTimeout, testUnhealthyThreshold, @@ -144,7 +144,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { go cct.healthCheckCluster(ctx, &healthCheckInput{ testClusterKey, - testEnv.Config, + env.Config, testPollInterval, testPollTimeout, testUnhealthyThreshold, @@ -170,7 +170,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) l.Close() - config := rest.CopyConfig(testEnv.Config) + config := rest.CopyConfig(env.Config) config.Host = fmt.Sprintf("http://127.0.0.1:%d", l.Addr().(*net.TCPAddr).Port) // TODO(community): Fill in these field names. diff --git a/controllers/remote/cluster_cache_reconciler_test.go b/controllers/remote/cluster_cache_reconciler_test.go index d8a2652bdea7..9a9f4d696981 100644 --- a/controllers/remote/cluster_cache_reconciler_test.go +++ b/controllers/remote/cluster_cache_reconciler_test.go @@ -62,7 +62,7 @@ func TestClusterCacheReconciler(t *testing.T) { }, timeout).Should(Succeed()) t.Log("Creating a test cluster kubeconfig") - g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) // Check the secret can be fetched from the API server secretKey := client.ObjectKey{Namespace: testNamespace.GetName(), Name: fmt.Sprintf("%s-kubeconfig", testCluster.GetName())} @@ -78,7 +78,7 @@ func TestClusterCacheReconciler(t *testing.T) { setup := func(t *testing.T, g *WithT) { t.Log("Setting up a new manager") var err error - mgr, err = manager.New(testEnv.Config, manager.Options{ + mgr, err = manager.New(env.Config, manager.Options{ Scheme: scheme.Scheme, MetricsBindAddress: "0", }) @@ -101,7 +101,7 @@ func TestClusterCacheReconciler(t *testing.T) { go func() { g.Expect(mgr.Start(mgrContext)).To(Succeed()) }() - <-testEnv.Manager.Elected() + <-env.Manager.Elected() k8sClient = mgr.GetClient() diff --git a/controllers/remote/cluster_cache_tracker_test.go b/controllers/remote/cluster_cache_tracker_test.go index 5bac400260a3..44c6b1ca675e 100644 --- a/controllers/remote/cluster_cache_tracker_test.go +++ b/controllers/remote/cluster_cache_tracker_test.go @@ -64,7 +64,7 @@ func TestClusterCacheTracker(t *testing.T) { setup := func(t *testing.T, g *WithT) { t.Log("Setting up a new manager") var err error - mgr, err = manager.New(testEnv.Config, manager.Options{ + mgr, err = manager.New(env.Config, manager.Options{ Scheme: scheme.Scheme, MetricsBindAddress: "0", }) @@ -81,7 +81,7 @@ func TestClusterCacheTracker(t *testing.T) { go func() { g.Expect(mgr.Start(mgrContext)).To(Succeed()) }() - <-testEnv.Manager.Elected() + <-env.Manager.Elected() k8sClient = mgr.GetClient() @@ -107,7 +107,7 @@ func TestClusterCacheTracker(t *testing.T) { g.Expect(k8sClient.Status().Update(ctx, clusterA)).To(Succeed()) t.Log("Creating a test cluster kubeconfig") - g.Expect(testEnv.CreateKubeconfigSecret(ctx, clusterA)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, clusterA)).To(Succeed()) } teardown := func(t *testing.T, g *WithT) { diff --git a/controllers/remote/suite_test.go b/controllers/remote/suite_test.go index 66ff38c10762..7b69dd953173 100644 --- a/controllers/remote/suite_test.go +++ b/controllers/remote/suite_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) @@ -32,27 +32,27 @@ const ( ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { fmt.Println("Creating a new test environment") - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { fmt.Println("Starting the test environment manager") - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() code := m.Run() fmt.Println("Stopping the test environment") - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) } diff --git a/controllers/schema_test.go b/controllers/schema_test.go index 9f121b48c6dd..ac6bcec533ee 100644 --- a/controllers/schema_test.go +++ b/controllers/schema_test.go @@ -28,7 +28,7 @@ import ( func TestMachineSetScheme(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "schema-test") + ns, err := env.CreateNamespace(ctx, "schema-test") g.Expect(err).ToNot(HaveOccurred()) testMachineSet := &clusterv1.MachineSet{ @@ -46,10 +46,10 @@ func TestMachineSetScheme(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, testMachineSet)).To(Succeed()) + g.Expect(env.Create(ctx, testMachineSet)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, testMachineSet) g.Expect(testMachineSet.Spec.Replicas).To(Equal(pointer.Int32Ptr(1))) @@ -57,7 +57,7 @@ func TestMachineSetScheme(t *testing.T) { func TestMachineDeploymentScheme(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "schema-test") + ns, err := env.CreateNamespace(ctx, "schema-test") g.Expect(err).ToNot(HaveOccurred()) testMachineDeployment := &clusterv1.MachineDeployment{ @@ -75,10 +75,10 @@ func TestMachineDeploymentScheme(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, testMachineDeployment)).To(Succeed()) + g.Expect(env.Create(ctx, testMachineDeployment)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, testMachineDeployment) g.Expect(testMachineDeployment.Spec.Replicas).To(Equal(pointer.Int32Ptr(1))) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index f86b0899f074..0b39ac2c0872 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/remote" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log" @@ -41,72 +41,72 @@ const ( ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { fmt.Println("Creating a new test environment") - testEnv = helpers.NewTestEnvironment() + env = envtest.New() // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers // requiring a connection to a remote cluster tracker, err := remote.NewClusterCacheTracker( log.Log, - testEnv.Manager, + env.Manager, ) if err != nil { panic(fmt.Sprintf("unable to create cluster cache tracker: %v", err)) } if err := (&remote.ClusterCacheReconciler{ - Client: testEnv, + Client: env, Log: log.Log, Tracker: tracker, - }).SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + }).SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start ClusterCacheReconciler: %v", err)) } if err := (&ClusterReconciler{ - Client: testEnv, - recorder: testEnv.GetEventRecorderFor("cluster-controller"), - }).SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + Client: env, + recorder: env.GetEventRecorderFor("cluster-controller"), + }).SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start ClusterReconciler: %v", err)) } if err := (&MachineReconciler{ - Client: testEnv, + Client: env, Tracker: tracker, - recorder: testEnv.GetEventRecorderFor("machine-controller"), - }).SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + recorder: env.GetEventRecorderFor("machine-controller"), + }).SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MachineReconciler: %v", err)) } if err := (&MachineSetReconciler{ - Client: testEnv, + Client: env, Tracker: tracker, - recorder: testEnv.GetEventRecorderFor("machineset-controller"), - }).SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + recorder: env.GetEventRecorderFor("machineset-controller"), + }).SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MMachineSetReconciler: %v", err)) } if err := (&MachineDeploymentReconciler{ - Client: testEnv, - recorder: testEnv.GetEventRecorderFor("machinedeployment-controller"), - }).SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + Client: env, + recorder: env.GetEventRecorderFor("machinedeployment-controller"), + }).SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MMachineDeploymentReconciler: %v", err)) } if err := (&MachineHealthCheckReconciler{ - Client: testEnv, + Client: env, Tracker: tracker, - recorder: testEnv.GetEventRecorderFor("machinehealthcheck-controller"), - }).SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + recorder: env.GetEventRecorderFor("machinehealthcheck-controller"), + }).SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MachineHealthCheckReconciler : %v", err)) } go func() { fmt.Println("Starting the test environment manager") - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() SetDefaultEventuallyPollingInterval(100 * time.Millisecond) SetDefaultEventuallyTimeout(timeout) @@ -114,7 +114,7 @@ func TestMain(m *testing.M) { code := m.Run() fmt.Println("Stopping the test environment") - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) } diff --git a/controllers/suite_util_test.go b/controllers/suite_util_test.go index f8cdc2d06de5..62247f571be4 100644 --- a/controllers/suite_util_test.go +++ b/controllers/suite_util_test.go @@ -41,7 +41,7 @@ func intOrStrPtr(i int32) *intstr.IntOrString { func fakeBootstrapRefReady(ref corev1.ObjectReference, base map[string]interface{}, g *WithT) { bref := (&unstructured.Unstructured{Object: base}).DeepCopy() g.Eventually(func() error { - return testEnv.Get(ctx, client.ObjectKey{Name: ref.Name, Namespace: ref.Namespace}, bref) + return env.Get(ctx, client.ObjectKey{Name: ref.Name, Namespace: ref.Namespace}, bref) }).Should(Succeed()) bdataSecret := &corev1.Secret{ @@ -53,35 +53,35 @@ func fakeBootstrapRefReady(ref corev1.ObjectReference, base map[string]interface "value": "data", }, } - g.Expect(testEnv.Create(ctx, bdataSecret)).To(Succeed()) + g.Expect(env.Create(ctx, bdataSecret)).To(Succeed()) brefPatch := client.MergeFrom(bref.DeepCopy()) g.Expect(unstructured.SetNestedField(bref.Object, true, "status", "ready")).To(Succeed()) g.Expect(unstructured.SetNestedField(bref.Object, bdataSecret.Name, "status", "dataSecretName")).To(Succeed()) - g.Expect(testEnv.Status().Patch(ctx, bref, brefPatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, bref, brefPatch)).To(Succeed()) } func fakeInfrastructureRefReady(ref corev1.ObjectReference, base map[string]interface{}, g *WithT) string { iref := (&unstructured.Unstructured{Object: base}).DeepCopy() g.Eventually(func() error { - return testEnv.Get(ctx, client.ObjectKey{Name: ref.Name, Namespace: ref.Namespace}, iref) + return env.Get(ctx, client.ObjectKey{Name: ref.Name, Namespace: ref.Namespace}, iref) }).Should(Succeed()) irefPatch := client.MergeFrom(iref.DeepCopy()) providerID := fmt.Sprintf("test:////%v", uuid.NewUUID()) g.Expect(unstructured.SetNestedField(iref.Object, providerID, "spec", "providerID")).To(Succeed()) - g.Expect(testEnv.Patch(ctx, iref, irefPatch)).To(Succeed()) + g.Expect(env.Patch(ctx, iref, irefPatch)).To(Succeed()) irefPatch = client.MergeFrom(iref.DeepCopy()) g.Expect(unstructured.SetNestedField(iref.Object, true, "status", "ready")).To(Succeed()) - g.Expect(testEnv.Status().Patch(ctx, iref, irefPatch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, iref, irefPatch)).To(Succeed()) return providerID } func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { g.Eventually(func() error { key := client.ObjectKey{Name: m.Name, Namespace: m.Namespace} - return testEnv.Get(ctx, key, &clusterv1.Machine{}) + return env.Get(ctx, key, &clusterv1.Machine{}) }).Should(Succeed()) if m.Status.NodeRef != nil { @@ -97,22 +97,22 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { ProviderID: pid, }, } - g.Expect(testEnv.Create(ctx, node)).To(Succeed()) + g.Expect(env.Create(ctx, node)).To(Succeed()) g.Eventually(func() error { key := client.ObjectKey{Name: node.Name, Namespace: node.Namespace} - return testEnv.Get(ctx, key, &corev1.Node{}) + return env.Get(ctx, key, &corev1.Node{}) }).Should(Succeed()) // Patch the node and make it look like ready. patchNode := client.MergeFrom(node.DeepCopy()) node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue}) - g.Expect(testEnv.Status().Patch(ctx, node, patchNode)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, node, patchNode)).To(Succeed()) // Patch the Machine. patchMachine := client.MergeFrom(m.DeepCopy()) m.Spec.ProviderID = pointer.StringPtr(pid) - g.Expect(testEnv.Patch(ctx, m, patchMachine)).To(Succeed()) + g.Expect(env.Patch(ctx, m, patchMachine)).To(Succeed()) patchMachine = client.MergeFrom(m.DeepCopy()) m.Status.NodeRef = &corev1.ObjectReference{ @@ -120,5 +120,5 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { Kind: node.Kind, Name: node.Name, } - g.Expect(testEnv.Status().Patch(ctx, m, patchMachine)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, m, patchMachine)).To(Succeed()) } diff --git a/controlplane/kubeadm/api/v1alpha3/webhook_suite_test.go b/controlplane/kubeadm/api/v1alpha3/suite_test.go similarity index 79% rename from controlplane/kubeadm/api/v1alpha3/webhook_suite_test.go rename to controlplane/kubeadm/api/v1alpha3/suite_test.go index e94f87abc796..2f899e906291 100644 --- a/controlplane/kubeadm/api/v1alpha3/webhook_suite_test.go +++ b/controlplane/kubeadm/api/v1alpha3/suite_test.go @@ -23,32 +23,32 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { // Bootstrapping test environment utilruntime.Must(AddToScheme(scheme.Scheme)) - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() // Run tests code := m.Run() // Tearing down the test environment - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) } diff --git a/controlplane/kubeadm/api/v1alpha3/webhook_test.go b/controlplane/kubeadm/api/v1alpha3/webhook_test.go index 07579b77d28b..42fe8de9c4c5 100644 --- a/controlplane/kubeadm/api/v1alpha3/webhook_test.go +++ b/controlplane/kubeadm/api/v1alpha3/webhook_test.go @@ -35,7 +35,7 @@ import ( func TestKubeadmControlPlaneConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) infraMachineTemplateName := fmt.Sprintf("test-machinetemplate-%s", util.RandomString(5)) controlPlaneName := fmt.Sprintf("test-controlpane-%s", util.RandomString(5)) @@ -87,8 +87,8 @@ func TestKubeadmControlPlaneConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, controlPlane)).To(Succeed()) + g.Expect(env.Create(ctx, controlPlane)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, controlPlane) } diff --git a/controlplane/kubeadm/controllers/controller_test.go b/controlplane/kubeadm/controllers/controller_test.go index 852fc6a4e0ec..194121eefab4 100644 --- a/controlplane/kubeadm/controllers/controller_test.go +++ b/controlplane/kubeadm/controllers/controller_test.go @@ -127,11 +127,11 @@ func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) { func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) { g := NewWithT(t) cluster, kcp, _ := createClusterWithControlPlane() - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) - g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, kcp)).To(Succeed()) r := &KubeadmControlPlaneReconciler{ - Client: testEnv, + Client: env, recorder: record.NewFakeRecorder(32), } @@ -140,7 +140,7 @@ func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) { g.Expect(result).To(Equal(ctrl.Result{})) // calling reconcile should return error - g.Expect(testEnv.Delete(ctx, cluster)).To(Succeed()) + g.Expect(env.Delete(ctx, cluster)).To(Succeed()) g.Eventually(func() error { _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -153,23 +153,23 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { g := NewWithT(t) r := &KubeadmControlPlaneReconciler{ - Client: testEnv, + Client: env, recorder: record.NewFakeRecorder(32), - managementCluster: &internal.Management{Client: testEnv.Client, Tracker: nil}, + managementCluster: &internal.Management{Client: env.Client, Tracker: nil}, } cluster, kcp, _ := createClusterWithControlPlane() - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) - g.Expect(testEnv.Create(ctx, kcp)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, kcp)).To(Succeed()) // read kcp.Generation after create - errGettingObject := testEnv.Get(ctx, util.ObjectKey(kcp), kcp) + errGettingObject := env.Get(ctx, util.ObjectKey(kcp), kcp) g.Expect(errGettingObject).NotTo(HaveOccurred()) generation := kcp.Generation // Set cluster.status.InfrastructureReady so we actually enter in the reconcile loop patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"status\":{\"infrastructureReady\":%t}}", true))) - g.Expect(testEnv.Status().Patch(ctx, cluster, patch)).To(Succeed()) + g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed()) // call reconcile the first time, so we can check if observedGeneration is set when adding a finalizer result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -177,17 +177,17 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { g.Expect(result).To(Equal(ctrl.Result{})) g.Eventually(func() int64 { - errGettingObject = testEnv.Get(ctx, util.ObjectKey(kcp), kcp) + errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp) g.Expect(errGettingObject).NotTo(HaveOccurred()) return kcp.Status.ObservedGeneration }, 10*time.Second).Should(Equal(generation)) // triggers a generation change by changing the spec kcp.Spec.Replicas = pointer.Int32Ptr(*kcp.Spec.Replicas + 2) - g.Expect(testEnv.Update(ctx, kcp)).To(Succeed()) + g.Expect(env.Update(ctx, kcp)).To(Succeed()) // read kcp.Generation after the update - errGettingObject = testEnv.Get(ctx, util.ObjectKey(kcp), kcp) + errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp) g.Expect(errGettingObject).NotTo(HaveOccurred()) generation = kcp.Generation @@ -197,7 +197,7 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { _, _ = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) g.Eventually(func() int64 { - errGettingObject = testEnv.Get(ctx, util.ObjectKey(kcp), kcp) + errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp) g.Expect(errGettingObject).NotTo(HaveOccurred()) return kcp.Status.ObservedGeneration }, 10*time.Second).Should(Equal(generation)) diff --git a/controlplane/kubeadm/controllers/remediation_test.go b/controlplane/kubeadm/controllers/remediation_test.go index e29ebadf5724..6f92006411d6 100644 --- a/controlplane/kubeadm/controllers/remediation_test.go +++ b/controlplane/kubeadm/controllers/remediation_test.go @@ -43,10 +43,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g := NewWithT(t) ctx := context.TODO() r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), } - ns, err := testEnv.CreateNamespace(ctx, "ns1") + ns, err := env.CreateNamespace(ctx, "ns1") g.Expect(err).ToNot(HaveOccurred()) t.Run("Remediation does not happen if there are no unhealthy machines", func(t *testing.T) { @@ -102,7 +102,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal then 1") - g.Expect(testEnv.Cleanup(ctx, m)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m)).To(Succeed()) }) t.Run("Remediation does not happen if number of machines lower than desired", func(t *testing.T) { g := NewWithT(t) @@ -123,7 +123,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for having at least 3 control plane machines before triggering remediation") - g.Expect(testEnv.Cleanup(ctx, m1, m2)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) t.Run("Remediation does not happen if there is a deleting machine", func(t *testing.T) { g := NewWithT(t) @@ -144,7 +144,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine deletion to complete before triggering remediation") - g.Expect(testEnv.Cleanup(ctx, m1, m2)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) t.Run("Remediation does not happen if there is at least one additional unhealthy etcd member on a 3 machine CP", func(t *testing.T) { g := NewWithT(t) @@ -162,7 +162,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -177,7 +177,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because this could result in etcd loosing quorum") - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) t.Run("Remediation does not happen if there is at least two additional unhealthy etcd member on a 5 machine CP", func(t *testing.T) { g := NewWithT(t) @@ -197,7 +197,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -212,13 +212,13 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because this could result in etcd loosing quorum") - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) }) t.Run("Remediation deletes unhealthy machine - 2 CP (during 1 CP rolling upgrade)", func(t *testing.T) { g := NewWithT(t) m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed()) - patchHelper, err := patch.NewHelper(m1, testEnv.GetClient()) + patchHelper, err := patch.NewHelper(m1, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m1.ObjectMeta.Finalizers = []string{"wait-before-delete"} g.Expect(patchHelper.Patch(ctx, m1)) @@ -235,7 +235,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -251,22 +251,22 @@ func TestReconcileUnhealthyMachines(t *testing.T) { assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - err = testEnv.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) + err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) g.Expect(m1.ObjectMeta.DeletionTimestamp.IsZero()).To(BeFalse()) - patchHelper, err = patch.NewHelper(m1, testEnv.GetClient()) + patchHelper, err = patch.NewHelper(m1, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m1.ObjectMeta.Finalizers = nil g.Expect(patchHelper.Patch(ctx, m1)) - g.Expect(testEnv.Cleanup(ctx, m1, m2)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) t.Run("Remediation deletes unhealthy machine - 3 CP", func(t *testing.T) { g := NewWithT(t) m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed()) - patchHelper, err := patch.NewHelper(m1, testEnv.GetClient()) + patchHelper, err := patch.NewHelper(m1, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m1.ObjectMeta.Finalizers = []string{"wait-before-delete"} g.Expect(patchHelper.Patch(ctx, m1)) @@ -284,7 +284,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -300,22 +300,22 @@ func TestReconcileUnhealthyMachines(t *testing.T) { assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - err = testEnv.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) + err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) g.Expect(m1.ObjectMeta.DeletionTimestamp.IsZero()).To(BeFalse()) - patchHelper, err = patch.NewHelper(m1, testEnv.GetClient()) + patchHelper, err = patch.NewHelper(m1, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m1.ObjectMeta.Finalizers = nil g.Expect(patchHelper.Patch(ctx, m1)) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) t.Run("Remediation deletes unhealthy machine - 4 CP (during 3 CP rolling upgrade)", func(t *testing.T) { g := NewWithT(t) m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed()) - patchHelper, err := patch.NewHelper(m1, testEnv.GetClient()) + patchHelper, err := patch.NewHelper(m1, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m1.ObjectMeta.Finalizers = []string{"wait-before-delete"} g.Expect(patchHelper.Patch(ctx, m1)) @@ -334,7 +334,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -350,26 +350,26 @@ func TestReconcileUnhealthyMachines(t *testing.T) { assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - err = testEnv.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) + err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) g.Expect(m1.ObjectMeta.DeletionTimestamp.IsZero()).To(BeFalse()) - patchHelper, err = patch.NewHelper(m1, testEnv.GetClient()) + patchHelper, err = patch.NewHelper(m1, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) m1.ObjectMeta.Finalizers = nil g.Expect(patchHelper.Patch(ctx, m1)) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3, m4)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3, m4)).To(Succeed()) }) - g.Expect(testEnv.Cleanup(ctx, ns)).To(Succeed()) + g.Expect(env.Cleanup(ctx, ns)).To(Succeed()) } func TestCanSafelyRemoveEtcdMember(t *testing.T) { g := NewWithT(t) ctx := context.TODO() - ns, err := testEnv.CreateNamespace(ctx, "ns1") + ns, err := env.CreateNamespace(ctx, "ns1") g.Expect(err).ToNot(HaveOccurred()) t.Run("Can't safely remediate 1 machine CP", func(t *testing.T) { @@ -386,7 +386,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -399,7 +399,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeFalse()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1)).To(Succeed()) }) t.Run("Can safely remediate 2 machine CP without additional etcd member failures", func(t *testing.T) { g := NewWithT(t) @@ -416,7 +416,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -429,7 +429,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) t.Run("Can safely remediate 2 machines CP when the etcd member being remediated is missing", func(t *testing.T) { g := NewWithT(t) @@ -453,7 +453,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -466,7 +466,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) t.Run("Can't safely remediate 2 machines CP with one additional etcd member failure", func(t *testing.T) { g := NewWithT(t) @@ -483,7 +483,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -496,7 +496,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeFalse()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) t.Run("Can safely remediate 3 machines CP without additional etcd member failures", func(t *testing.T) { g := NewWithT(t) @@ -514,7 +514,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -527,7 +527,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) t.Run("Can safely remediate 3 machines CP when the etcd member being remediated is missing", func(t *testing.T) { g := NewWithT(t) @@ -552,7 +552,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -565,7 +565,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) t.Run("Can't safely remediate 3 machines CP with one additional etcd member failure", func(t *testing.T) { g := NewWithT(t) @@ -583,7 +583,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -596,7 +596,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeFalse()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) t.Run("Can safely remediate 5 machines CP less than 2 additional etcd member failures", func(t *testing.T) { g := NewWithT(t) @@ -616,7 +616,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -629,7 +629,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) }) t.Run("Can't safely remediate 5 machines CP with 2 additional etcd member failures", func(t *testing.T) { g := NewWithT(t) @@ -649,7 +649,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -662,7 +662,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeFalse()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) }) t.Run("Can safely remediate 7 machines CP with less than 3 additional etcd member failures", func(t *testing.T) { g := NewWithT(t) @@ -684,7 +684,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -697,7 +697,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3, m4, m5, m6, m7)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5, m6, m7)).To(Succeed()) }) t.Run("Can't safely remediate 7 machines CP with 3 additional etcd member failures", func(t *testing.T) { g := NewWithT(t) @@ -719,7 +719,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { } r := &KubeadmControlPlaneReconciler{ - Client: testEnv.GetClient(), + Client: env.GetClient(), recorder: record.NewFakeRecorder(32), managementCluster: &fakeManagementCluster{ Workload: fakeWorkloadCluster{ @@ -732,9 +732,9 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { g.Expect(ret).To(BeFalse()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(testEnv.Cleanup(ctx, m1, m2, m3, m4, m5, m6, m7)).To(Succeed()) + g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5, m6, m7)).To(Succeed()) }) - g.Expect(testEnv.Cleanup(ctx, ns)).To(Succeed()) + g.Expect(env.Cleanup(ctx, ns)).To(Succeed()) } func nodes(machines collections.Machines) []string { @@ -790,9 +790,9 @@ func createMachine(ctx context.Context, g *WithT, namespace, name string, option }, }, } - g.Expect(testEnv.Create(ctx, m)).To(Succeed()) + g.Expect(env.Create(ctx, m)).To(Succeed()) - patchHelper, err := patch.NewHelper(m, testEnv.GetClient()) + patchHelper, err := patch.NewHelper(m, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) for _, opt := range append(options, withNodeRef(fmt.Sprintf("node-%s", m.Name))) { @@ -827,7 +827,7 @@ func getDeletingMachine(namespace, name string, options ...machineOption) *clust func assertMachineCondition(ctx context.Context, g *WithT, m *clusterv1.Machine, t clusterv1.ConditionType, status corev1.ConditionStatus, reason string, severity clusterv1.ConditionSeverity, message string) { g.Eventually(func() error { - if err := testEnv.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m); err != nil { + if err := env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m); err != nil { return err } c := conditions.Get(m, t) diff --git a/controlplane/kubeadm/controllers/suite_test.go b/controlplane/kubeadm/controllers/suite_test.go index 7ceb60d2c10a..f8097672cb3f 100644 --- a/controlplane/kubeadm/controllers/suite_test.go +++ b/controlplane/kubeadm/controllers/suite_test.go @@ -21,31 +21,31 @@ import ( "os" "testing" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { // Bootstrapping test environment - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() // Run tests code := m.Run() // Tearing down the test environment - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) } diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index e1679aa7e7d5..71168b196305 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -79,10 +79,10 @@ func TestGetMachinesForCluster(t *testing.T) { func TestGetWorkloadCluster(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "workload-cluster2") + ns, err := env.CreateNamespace(ctx, "workload-cluster2") g.Expect(err).ToNot(HaveOccurred()) defer func() { - g.Expect(testEnv.Cleanup(ctx, ns)).To(Succeed()) + g.Expect(env.Cleanup(ctx, ns)).To(Succeed()) }() // Create an etcd secret with valid certs @@ -108,7 +108,7 @@ func TestGetWorkloadCluster(t *testing.T) { badCrtEtcdSecret.Data[secret.TLSCrtDataName] = []byte("bad cert") tracker, err := remote.NewClusterCacheTracker( log.Log, - testEnv.Manager, + env.Manager, ) g.Expect(err).ToNot(HaveOccurred()) @@ -116,7 +116,7 @@ func TestGetWorkloadCluster(t *testing.T) { // Store the envtest config as the contents of the kubeconfig secret. // This way we are using the envtest environment as both the // management and the workload cluster. - testEnvKubeconfig := kubeconfig.FromEnvTestConfig(testEnv.GetConfig(), &clusterv1.Cluster{ + testEnvKubeconfig := kubeconfig.FromEnvTestConfig(env.GetConfig(), &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: ns.Name, @@ -185,14 +185,14 @@ func TestGetWorkloadCluster(t *testing.T) { g := NewWithT(t) for _, o := range tt.objs { - g.Expect(testEnv.Client.Create(ctx, o)).To(Succeed()) + g.Expect(env.Client.Create(ctx, o)).To(Succeed()) defer func(do client.Object) { - g.Expect(testEnv.Cleanup(ctx, do)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do)).To(Succeed()) }(o) } m := Management{ - Client: testEnv, + Client: env, Tracker: tracker, } diff --git a/controlplane/kubeadm/internal/suite_test.go b/controlplane/kubeadm/internal/suite_test.go index 95596fa0d098..db8e87583587 100644 --- a/controlplane/kubeadm/internal/suite_test.go +++ b/controlplane/kubeadm/internal/suite_test.go @@ -21,28 +21,28 @@ import ( "os" "testing" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() code := m.Run() - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop envtest: %v", err)) } diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go index 9008e53367c6..c05d9a44ace8 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go @@ -396,16 +396,16 @@ kind: ClusterConfiguration for _, o := range tt.objs { // NB. deep copy test object so changes applied during a test does not affect other tests. o := o.DeepCopyObject().(client.Object) - g.Expect(testEnv.CreateAndWait(ctx, o)).To(Succeed()) + g.Expect(env.CreateAndWait(ctx, o)).To(Succeed()) } // Register cleanup function t.Cleanup(func() { - _ = testEnv.CleanupAndWait(ctx, tt.objs...) + _ = env.CleanupAndWait(ctx, tt.objs...) }) w := &Workload{ - Client: testEnv.GetClient(), + Client: env.GetClient(), CoreDNSMigrator: tt.migrator, } err := w.UpdateCoreDNS(ctx, tt.kcp, semver.MustParse("1.19.1")) @@ -420,13 +420,13 @@ kind: ClusterConfiguration if tt.expectUpdates { // assert kubeadmConfigMap var expectedKubeadmConfigMap corev1.ConfigMap - g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed()) + g.Expect(env.Get(ctx, client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed()) g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring(tt.kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag))) g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring(tt.kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageRepository))) // assert CoreDNS corefile var expectedConfigMap corev1.ConfigMap - g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) + g.Expect(env.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed()) g.Expect(expectedConfigMap.Data).To(HaveLen(2)) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", "updated-core-file")) g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", expectedCorefile)) @@ -434,7 +434,7 @@ kind: ClusterConfiguration // assert CoreDNS deployment var actualDeployment appsv1.Deployment g.Eventually(func() string { - g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) + g.Expect(env.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed()) return actualDeployment.Spec.Template.Spec.Containers[0].Image }, "5s").Should(Equal(tt.expectImage)) } diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go index 918e018715fc..1b980e36b629 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go @@ -571,14 +571,14 @@ func TestReconcileEtcdMembers(t *testing.T) { g := NewWithT(t) for _, o := range tt.objs { - g.Expect(testEnv.CreateAndWait(ctx, o)).To(Succeed()) + g.Expect(env.CreateAndWait(ctx, o)).To(Succeed()) defer func(do client.Object) { - g.Expect(testEnv.CleanupAndWait(ctx, do)).To(Succeed()) + g.Expect(env.CleanupAndWait(ctx, do)).To(Succeed()) }(o) } w := &Workload{ - Client: testEnv.Client, + Client: env.Client, etcdClientGenerator: tt.etcdClientGenerator, } ctx := context.TODO() @@ -590,7 +590,7 @@ func TestReconcileEtcdMembers(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) if tt.assert != nil { - tt.assert(g, testEnv.Client) + tt.assert(g, env.Client) } }) } diff --git a/exp/addons/api/v1alpha3/webhook_suite_test.go b/exp/addons/api/v1alpha3/suite_test.go similarity index 79% rename from exp/addons/api/v1alpha3/webhook_suite_test.go rename to exp/addons/api/v1alpha3/suite_test.go index e94f87abc796..2f899e906291 100644 --- a/exp/addons/api/v1alpha3/webhook_suite_test.go +++ b/exp/addons/api/v1alpha3/suite_test.go @@ -23,32 +23,32 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { // Bootstrapping test environment utilruntime.Must(AddToScheme(scheme.Scheme)) - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() // Run tests code := m.Run() // Tearing down the test environment - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) } diff --git a/exp/addons/api/v1alpha3/webhook_test.go b/exp/addons/api/v1alpha3/webhook_test.go index 702af1e73a43..72c344124c12 100644 --- a/exp/addons/api/v1alpha3/webhook_test.go +++ b/exp/addons/api/v1alpha3/webhook_test.go @@ -30,7 +30,7 @@ import ( func TestClusterResourceSetConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) crsName := fmt.Sprintf("test-clusterresourceset-%s", util.RandomString(5)) @@ -55,15 +55,15 @@ func TestClusterResourceSetConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, crs)).To(Succeed()) + g.Expect(env.Create(ctx, crs)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, crs) } func TestClusterResourceSetBindingConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) crsbindingName := fmt.Sprintf("test-clusterresourcesetbinding-%s", util.RandomString(5)) crsbinding := &ClusterResourceSetBinding{ @@ -100,8 +100,8 @@ func TestClusterResourceSetBindingConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, crsbinding)).To(Succeed()) + g.Expect(env.Create(ctx, crsbinding)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, crsbinding) } diff --git a/exp/addons/controllers/clusterresourceset_controller_test.go b/exp/addons/controllers/clusterresourceset_controller_test.go index b75753eed950..05d02622d081 100644 --- a/exp/addons/controllers/clusterresourceset_controller_test.go +++ b/exp/addons/controllers/clusterresourceset_controller_test.go @@ -54,9 +54,9 @@ func TestClusterResourceSetReconciler(t *testing.T) { testCluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName, Namespace: defaultNamespaceName}} t.Log("Creating the Cluster") - g.Expect(testEnv.Create(ctx, testCluster)).To(Succeed()) + g.Expect(env.Create(ctx, testCluster)).To(Succeed()) t.Log("Creating the remote Cluster kubeconfig") - g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) testConfigmap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: configmapName, @@ -86,8 +86,8 @@ metadata: }, } t.Log("Creating a Secret and a ConfigMap with ConfigMap in their data field") - g.Expect(testEnv.Create(ctx, testConfigmap)).To(Succeed()) - g.Expect(testEnv.Create(ctx, testSecret)).To(Succeed()) + g.Expect(env.Create(ctx, testConfigmap)).To(Succeed()) + g.Expect(env.Create(ctx, testSecret)).To(Succeed()) } teardown := func(t *testing.T, g *WithT) { @@ -98,7 +98,7 @@ metadata: Namespace: defaultNamespaceName, }, } - g.Expect(testEnv.Delete(ctx, secret)).To(Succeed()) + g.Expect(env.Delete(ctx, secret)).To(Succeed()) clusterResourceSetInstance := &addonsv1.ClusterResourceSet{ ObjectMeta: metav1.ObjectMeta{ @@ -107,9 +107,9 @@ metadata: }, } - err := testEnv.Get(ctx, client.ObjectKey{Namespace: clusterResourceSetInstance.Namespace, Name: clusterResourceSetInstance.Name}, clusterResourceSetInstance) + err := env.Get(ctx, client.ObjectKey{Namespace: clusterResourceSetInstance.Namespace, Name: clusterResourceSetInstance.Name}, clusterResourceSetInstance) if err == nil { - g.Expect(testEnv.Delete(ctx, clusterResourceSetInstance)).To(Succeed()) + g.Expect(env.Delete(ctx, clusterResourceSetInstance)).To(Succeed()) } g.Eventually(func() bool { @@ -118,15 +118,15 @@ metadata: Name: clusterResourceSetInstance.Name, } crs := &addonsv1.ClusterResourceSet{} - err := testEnv.Get(ctx, crsKey, crs) + err := env.Get(ctx, crsKey, crs) return err != nil }, timeout).Should(BeTrue()) - g.Expect(testEnv.Delete(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + g.Expect(env.Delete(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ Name: configmapName, Namespace: defaultNamespaceName, }})).To(Succeed()) - g.Expect(testEnv.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + g.Expect(env.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: defaultNamespaceName, }})).To(Succeed()) @@ -139,7 +139,7 @@ metadata: t.Log("Updating the cluster with labels") testCluster.SetLabels(labels) - g.Expect(testEnv.Update(ctx, testCluster)).To(Succeed()) + g.Expect(env.Update(ctx, testCluster)).To(Succeed()) t.Log("Creating a ClusterResourceSet instance that has same labels as selector") clusterResourceSetInstance := &addonsv1.ClusterResourceSet{ @@ -155,7 +155,7 @@ metadata: }, } // Create the ClusterResourceSet. - g.Expect(testEnv.Create(ctx, clusterResourceSetInstance)).To(Succeed()) + g.Expect(env.Create(ctx, clusterResourceSetInstance)).To(Succeed()) t.Log("Verifying ClusterResourceSetBinding is created with cluster owner reference") g.Eventually(func() bool { @@ -164,7 +164,7 @@ metadata: Namespace: testCluster.Namespace, Name: testCluster.Name, } - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) if err != nil { return false } @@ -188,7 +188,7 @@ metadata: }) }, timeout).Should(BeTrue()) t.Log("Deleting the Cluster") - g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + g.Expect(env.Delete(ctx, testCluster)).To(Succeed()) }) t.Run("Should reconcile a cluster when its labels are changed to match a ClusterResourceSet's selector", func(t *testing.T) { @@ -208,10 +208,10 @@ metadata: }, } // Create the ClusterResourceSet. - g.Expect(testEnv.Create(ctx, clusterResourceSetInstance)).To(Succeed()) + g.Expect(env.Create(ctx, clusterResourceSetInstance)).To(Succeed()) testCluster.SetLabels(labels) - g.Expect(testEnv.Update(ctx, testCluster)).To(Succeed()) + g.Expect(env.Update(ctx, testCluster)).To(Succeed()) t.Log("Verifying ClusterResourceSetBinding is created with cluster owner reference") g.Eventually(func() bool { @@ -220,7 +220,7 @@ metadata: Namespace: testCluster.Namespace, Name: testCluster.Name, } - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) if err != nil { return false } @@ -241,16 +241,16 @@ metadata: g.Eventually(func() bool { binding := &addonsv1.ClusterResourceSetBinding{} - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) return err == nil }, timeout).Should(BeTrue()) t.Log("Verifying ClusterResourceSetBinding is deleted when its cluster owner reference is deleted") - g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + g.Expect(env.Delete(ctx, testCluster)).To(Succeed()) g.Eventually(func() bool { binding := &addonsv1.ClusterResourceSetBinding{} - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) return apierrors.IsNotFound(err) }, timeout).Should(BeTrue()) }) @@ -275,10 +275,10 @@ metadata: }, } // Create the ClusterResourceSet. - g.Expect(testEnv.Create(ctx, crsInstance)).To(Succeed()) + g.Expect(env.Create(ctx, crsInstance)).To(Succeed()) testCluster.SetLabels(labels) - g.Expect(testEnv.Update(ctx, testCluster)).To(Succeed()) + g.Expect(env.Update(ctx, testCluster)).To(Succeed()) t.Log("Verifying ClusterResourceSetBinding is created with cluster owner reference") // Wait until ClusterResourceSetBinding is created for the Cluster @@ -289,7 +289,7 @@ metadata: g.Eventually(func() bool { binding := &addonsv1.ClusterResourceSetBinding{} - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) return err == nil }, timeout).Should(BeTrue()) @@ -297,7 +297,7 @@ metadata: g.Eventually(func() bool { binding := &addonsv1.ClusterResourceSetBinding{} - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) if err == nil { if len(binding.Spec.Bindings) > 0 && len(binding.Spec.Bindings[0].Resources) == 0 { return true @@ -313,9 +313,9 @@ metadata: }, Data: map[string]string{}, } - g.Expect(testEnv.Create(ctx, newConfigmap)).To(Succeed()) + g.Expect(env.Create(ctx, newConfigmap)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, newConfigmap)).To(Succeed()) + g.Expect(env.Delete(ctx, newConfigmap)).To(Succeed()) }() cmKey := client.ObjectKey{ @@ -324,14 +324,14 @@ metadata: } g.Eventually(func() bool { m := &corev1.ConfigMap{} - err := testEnv.Get(ctx, cmKey, m) + err := env.Get(ctx, cmKey, m) return err == nil }, timeout).Should(BeTrue()) // When the ConfigMap resource is created, CRS should get reconciled immediately. g.Eventually(func() error { binding := &addonsv1.ClusterResourceSetBinding{} - if err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding); err != nil { + if err := env.Get(ctx, clusterResourceSetBindingKey, binding); err != nil { return err } if len(binding.Spec.Bindings[0].Resources) > 0 && binding.Spec.Bindings[0].Resources[0].Name == newCMName { @@ -341,11 +341,11 @@ metadata: }, timeout).Should(Succeed()) t.Log("Verifying ClusterResourceSetBinding is deleted when its cluster owner reference is deleted") - g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + g.Expect(env.Delete(ctx, testCluster)).To(Succeed()) g.Eventually(func() bool { binding := &addonsv1.ClusterResourceSetBinding{} - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) return apierrors.IsNotFound(err) }, timeout).Should(BeTrue()) }) @@ -357,7 +357,7 @@ metadata: t.Log("Updating the cluster with labels") testCluster.SetLabels(labels) - g.Expect(testEnv.Update(ctx, testCluster)).To(Succeed()) + g.Expect(env.Update(ctx, testCluster)).To(Succeed()) t.Log("Creating a ClusterResourceSet instance that has same labels as selector") clusterResourceSetInstance2 := &addonsv1.ClusterResourceSet{ @@ -373,7 +373,7 @@ metadata: }, } // Create the ClusterResourceSet. - g.Expect(testEnv.Create(ctx, clusterResourceSetInstance2)).To(Succeed()) + g.Expect(env.Create(ctx, clusterResourceSetInstance2)).To(Succeed()) t.Log("Creating a second ClusterResourceSet instance that has same labels as selector") clusterResourceSetInstance3 := &addonsv1.ClusterResourceSet{ @@ -389,7 +389,7 @@ metadata: }, } // Create the ClusterResourceSet. - g.Expect(testEnv.Create(ctx, clusterResourceSetInstance3)).To(Succeed()) + g.Expect(env.Create(ctx, clusterResourceSetInstance3)).To(Succeed()) t.Log("Verifying ClusterResourceSetBinding is created with 2 ClusterResourceSets") g.Eventually(func() bool { @@ -398,7 +398,7 @@ metadata: Namespace: testCluster.Namespace, Name: testCluster.Name, } - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) if err != nil { return false } @@ -407,14 +407,14 @@ metadata: t.Log("Verifying deleted CRS is deleted from ClusterResourceSetBinding") // Delete one of the CRS instances and wait until it is removed from the binding list. - g.Expect(testEnv.Delete(ctx, clusterResourceSetInstance2)).To(Succeed()) + g.Expect(env.Delete(ctx, clusterResourceSetInstance2)).To(Succeed()) g.Eventually(func() bool { binding := &addonsv1.ClusterResourceSetBinding{} clusterResourceSetBindingKey := client.ObjectKey{ Namespace: testCluster.Namespace, Name: testCluster.Name, } - err := testEnv.Get(ctx, clusterResourceSetBindingKey, binding) + err := env.Get(ctx, clusterResourceSetBindingKey, binding) if err != nil { return false } @@ -423,18 +423,18 @@ metadata: t.Log("Verifying ClusterResourceSetBinding is deleted after deleting all matching CRS objects") // Delete one of the CRS instances and wait until it is removed from the binding list. - g.Expect(testEnv.Delete(ctx, clusterResourceSetInstance3)).To(Succeed()) + g.Expect(env.Delete(ctx, clusterResourceSetInstance3)).To(Succeed()) g.Eventually(func() bool { binding := &addonsv1.ClusterResourceSetBinding{} clusterResourceSetBindingKey := client.ObjectKey{ Namespace: testCluster.Namespace, Name: testCluster.Name, } - return testEnv.Get(ctx, clusterResourceSetBindingKey, binding) != nil + return env.Get(ctx, clusterResourceSetBindingKey, binding) != nil }, timeout).Should(BeTrue()) t.Log("Deleting the Cluster") - g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + g.Expect(env.Delete(ctx, testCluster)).To(Succeed()) }) t.Run("Should add finalizer after reconcile", func(t *testing.T) { @@ -457,7 +457,7 @@ metadata: }, } // Create the ClusterResourceSet. - g.Expect(testEnv.Create(ctx, clusterResourceSetInstance)).To(Succeed()) + g.Expect(env.Create(ctx, clusterResourceSetInstance)).To(Succeed()) g.Eventually(func() bool { crsKey := client.ObjectKey{ Namespace: clusterResourceSetInstance.Namespace, @@ -465,7 +465,7 @@ metadata: } crs := &addonsv1.ClusterResourceSet{} - err := testEnv.Get(ctx, crsKey, crs) + err := env.Get(ctx, crsKey, crs) if err == nil { return len(crs.Finalizers) > 0 } diff --git a/exp/addons/controllers/suite_test.go b/exp/addons/controllers/suite_test.go index 8cbc11ed02e1..5cddf4902693 100644 --- a/exp/addons/controllers/suite_test.go +++ b/exp/addons/controllers/suite_test.go @@ -22,7 +22,7 @@ import ( "testing" "sigs.k8s.io/cluster-api/controllers/remote" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log" @@ -30,45 +30,45 @@ import ( ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { fmt.Println("Creating new test environment") - testEnv = helpers.NewTestEnvironment() + env = envtest.New() - trckr, err := remote.NewClusterCacheTracker(log.NullLogger{}, testEnv.Manager) + trckr, err := remote.NewClusterCacheTracker(log.NullLogger{}, env.Manager) if err != nil { panic(fmt.Sprintf("Failed to create new cluster cache tracker: %v", err)) } reconciler := ClusterResourceSetReconciler{ - Client: testEnv, + Client: env, Tracker: trckr, } - if err = reconciler.SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + if err = reconciler.SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to set up cluster resource set reconciler: %v", err)) } bindingReconciler := ClusterResourceSetBindingReconciler{ - Client: testEnv, + Client: env, } - if err = bindingReconciler.SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + if err = bindingReconciler.SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to set up cluster resource set binding reconciler: %v", err)) } go func() { fmt.Println("Starting the manager") - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() code := m.Run() fmt.Println("Tearing down test suite") - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop envtest: %v", err)) } diff --git a/exp/api/v1alpha3/suite_test.go b/exp/api/v1alpha3/suite_test.go new file mode 100644 index 000000000000..2f899e906291 --- /dev/null +++ b/exp/api/v1alpha3/suite_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "fmt" + "os" + "testing" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/cluster-api/internal/envtest" + ctrl "sigs.k8s.io/controller-runtime" + // +kubebuilder:scaffold:imports +) + +var ( + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + // Bootstrapping test environment + utilruntime.Must(AddToScheme(scheme.Scheme)) + env = envtest.New() + go func() { + if err := env.StartManager(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + } + }() + <-env.Manager.Elected() + env.WaitForWebhooks() + + // Run tests + code := m.Run() + // Tearing down the test environment + if err := env.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) + } + + // Report exit code + os.Exit(code) +} diff --git a/exp/api/v1alpha3/webhook_suite_test.go b/exp/api/v1alpha3/webhook_suite_test.go deleted file mode 100644 index e94f87abc796..000000000000 --- a/exp/api/v1alpha3/webhook_suite_test.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha3 - -import ( - "fmt" - "os" - "testing" - - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/cluster-api/test/helpers" - ctrl "sigs.k8s.io/controller-runtime" - // +kubebuilder:scaffold:imports -) - -var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() -) - -func TestMain(m *testing.M) { - // Bootstrapping test environment - utilruntime.Must(AddToScheme(scheme.Scheme)) - testEnv = helpers.NewTestEnvironment() - go func() { - if err := testEnv.StartManager(ctx); err != nil { - panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) - } - }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() - - // Run tests - code := m.Run() - // Tearing down the test environment - if err := testEnv.Stop(); err != nil { - panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) - } - - // Report exit code - os.Exit(code) -} diff --git a/exp/api/v1alpha3/webhook_test.go b/exp/api/v1alpha3/webhook_test.go index 47d20a839990..02a2b1e18a98 100644 --- a/exp/api/v1alpha3/webhook_test.go +++ b/exp/api/v1alpha3/webhook_test.go @@ -32,7 +32,7 @@ import ( func TestMachinePoolConversion(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) + ns, err := env.CreateNamespace(ctx, fmt.Sprintf("conversion-webhook-%s", util.RandomString(5))) g.Expect(err).ToNot(HaveOccurred()) clusterName := fmt.Sprintf("test-cluster-%s", util.RandomString(5)) machinePoolName := fmt.Sprintf("test-machinepool-%s", util.RandomString(5)) @@ -54,9 +54,9 @@ func TestMachinePoolConversion(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, machinePool)).To(Succeed()) + g.Expect(env.Create(ctx, machinePool)).To(Succeed()) defer func(do ...client.Object) { - g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) }(ns, machinePool) } diff --git a/exp/controllers/machinepool_controller_phases_test.go b/exp/controllers/machinepool_controller_phases_test.go index f5bf4068a6a7..db79be94d8a1 100644 --- a/exp/controllers/machinepool_controller_phases_test.go +++ b/exp/controllers/machinepool_controller_phases_test.go @@ -109,7 +109,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set OwnerReference and cluster name label on external objects", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -138,7 +138,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `Pending` with a new MachinePool", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -158,7 +158,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `Provisioning` when bootstrap is ready", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -185,7 +185,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `Running` when bootstrap and infra is ready", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -231,7 +231,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `Running` when bootstrap, infra, and ready replicas equals spec replicas", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -286,7 +286,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `Provisioned` when there is a NodeRef but infra is not ready ", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -316,7 +316,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `ScalingUp` when infra is scaling up", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -362,7 +362,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `ScalingDown` when infra is scaling down", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -415,7 +415,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { t.Run("Should set `Deleting` when MachinePool is being deleted", func(t *testing.T) { g := NewWithT(t) - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(testEnv.Config, defaultCluster)) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(env.Config, defaultCluster)) machinepool := defaultMachinePool.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 177cafe76d38..97fbdf226579 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -21,43 +21,43 @@ import ( "os" "testing" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" // +kubebuilder:scaffold:imports ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { fmt.Println("Creating new test environment") - testEnv = helpers.NewTestEnvironment() + env = envtest.New() machinePoolReconciler := MachinePoolReconciler{ - Client: testEnv, - recorder: testEnv.GetEventRecorderFor("machinepool-controller"), + Client: env, + recorder: env.GetEventRecorderFor("machinepool-controller"), } - err := machinePoolReconciler.SetupWithManager(ctx, testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}) + err := machinePoolReconciler.SetupWithManager(ctx, env.Manager, controller.Options{MaxConcurrentReconciles: 1}) if err != nil { panic(fmt.Sprintf("Failed to set up machine pool reconciler: %v", err)) } go func() { fmt.Println("Starting the manager") - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() code := m.Run() fmt.Println("Tearing down test suite") - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop envtest: %v", err)) } diff --git a/internal/envtest/doc.go b/internal/envtest/doc.go new file mode 100644 index 000000000000..5f7af1e98ca9 --- /dev/null +++ b/internal/envtest/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envtest contains the test environment to run integration tests. +package envtest diff --git a/test/helpers/envtest.go b/internal/envtest/environment.go similarity index 93% rename from test/helpers/envtest.go rename to internal/envtest/environment.go index 1296c2c5716a..ce6e7579e79a 100644 --- a/test/helpers/envtest.go +++ b/internal/envtest/environment.go @@ -14,8 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package helpers contains the test environment to run integration tests. -package helpers +package envtest import ( "context" @@ -113,8 +112,8 @@ func init() { } } -// TestEnvironment encapsulates a Kubernetes local test environment. -type TestEnvironment struct { +// Environment encapsulates a Kubernetes local test environment. +type Environment struct { manager.Manager client.Client Config *rest.Config @@ -122,11 +121,11 @@ type TestEnvironment struct { cancel context.CancelFunc } -// NewTestEnvironment creates a new environment spinning up a local api-server. +// New creates a new environment spinning up a local api-server. // // This function should be called only once for each package you're running tests within, // usually the environment is initialized in a suite_test.go file within a `BeforeSuite` ginkgo block. -func NewTestEnvironment() *TestEnvironment { +func New() *Environment { // initialize webhook here to be able to test the envtest install via webhookOptions // This should set LocalServingCertDir and LocalServingPort that are used below. initializeWebhookInEnvironment() @@ -188,7 +187,7 @@ func NewTestEnvironment() *TestEnvironment { klog.Fatalf("unable to create webhook for machinepool: %+v", err) } - return &TestEnvironment{ + return &Environment{ Manager: mgr, Client: mgr.GetClient(), Config: mgr.GetConfig(), @@ -276,14 +275,14 @@ func initializeWebhookInEnvironment() { } // StartManager starts the manager. -func (t *TestEnvironment) StartManager(ctx context.Context) error { +func (t *Environment) StartManager(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) t.cancel = cancel return t.Manager.Start(ctx) } // WaitForWebhooks waits for the webhook server to be available. -func (t *TestEnvironment) WaitForWebhooks() { +func (t *Environment) WaitForWebhooks() { port := env.WebhookInstallOptions.LocalServingPort klog.V(2).Infof("Waiting for webhook port %d to be open prior to running tests", port) @@ -302,18 +301,18 @@ func (t *TestEnvironment) WaitForWebhooks() { } // Stop stops the test environment. -func (t *TestEnvironment) Stop() error { +func (t *Environment) Stop() error { t.cancel() return env.Stop() } // CreateKubeconfigSecret generates a new Kubeconfig secret from the envtest config. -func (t *TestEnvironment) CreateKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster) error { +func (t *Environment) CreateKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster) error { return t.Create(ctx, kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(t.Config, cluster))) } // Cleanup deletes all the given objects. -func (t *TestEnvironment) Cleanup(ctx context.Context, objs ...client.Object) error { +func (t *Environment) Cleanup(ctx context.Context, objs ...client.Object) error { errs := []error{} for _, o := range objs { err := t.Client.Delete(ctx, o) @@ -328,7 +327,7 @@ func (t *TestEnvironment) Cleanup(ctx context.Context, objs ...client.Object) er // CleanupAndWait deletes all the given objects and waits for the cache to be updated accordingly. // // NOTE: Waiting for the cache to be updated helps in preventing test flakes due to the cache sync delays. -func (t *TestEnvironment) CleanupAndWait(ctx context.Context, objs ...client.Object) error { +func (t *Environment) CleanupAndWait(ctx context.Context, objs ...client.Object) error { if err := t.Cleanup(ctx, objs...); err != nil { return err } @@ -362,7 +361,7 @@ func (t *TestEnvironment) CleanupAndWait(ctx context.Context, objs ...client.Obj // CreateAndWait creates the given object and waits for the cache to be updated accordingly. // // NOTE: Waiting for the cache to be updated helps in preventing test flakes due to the cache sync delays. -func (t *TestEnvironment) CreateAndWait(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { +func (t *Environment) CreateAndWait(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { if err := t.Client.Create(ctx, obj, opts...); err != nil { return err } @@ -387,7 +386,7 @@ func (t *TestEnvironment) CreateAndWait(ctx context.Context, obj client.Object, } // CreateNamespace creates a new namespace with a generated name. -func (t *TestEnvironment) CreateNamespace(ctx context.Context, generateName string) (*corev1.Namespace, error) { +func (t *Environment) CreateNamespace(ctx context.Context, generateName string) (*corev1.Namespace, error) { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ GenerateName: fmt.Sprintf("%s-", generateName), diff --git a/util/collections/suite_test.go b/util/collections/suite_test.go index 49fb7c7fa13a..a17b58b69dc9 100644 --- a/util/collections/suite_test.go +++ b/util/collections/suite_test.go @@ -21,30 +21,30 @@ import ( "os" "testing" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { // Bootstrapping test environment - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() // Run tests code := m.Run() // Tearing down the test environment - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) } diff --git a/util/patch/patch_test.go b/util/patch/patch_test.go index 5934f4fa1e30..5c5693b602d6 100644 --- a/util/patch/patch_test.go +++ b/util/patch/patch_test.go @@ -54,25 +54,25 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the unstructured object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.GetName(), Namespace: obj.GetNamespace()} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) obj.Object["status"] = map[string]interface{}{ "ready": true, } - g.Expect(testEnv.Status().Update(ctx, obj)).To(Succeed()) + g.Expect(env.Status().Update(ctx, obj)).To(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Modifying the OwnerReferences") @@ -97,7 +97,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } return reflect.DeepEqual(obj.GetOwnerReferences(), objAfter.GetOwnerReferences()) @@ -121,20 +121,20 @@ func TestPatchHelper(t *testing.T) { } t.Log("Creating a Node object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.GetName()} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Appending a new condition") @@ -154,7 +154,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - g.Expect(testEnv.Get(ctx, key, objAfter)).To(Succeed()) + g.Expect(env.Get(ctx, key, objAfter)).To(Succeed()) ok, _ := ContainElement(condition).Match(objAfter.Status.Conditions) return ok @@ -175,20 +175,20 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Marking Ready=True") @@ -200,7 +200,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } return cmp.Equal(obj.Status.Conditions, objAfter.Status.Conditions) @@ -213,29 +213,29 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) objCopy := obj.DeepCopy() t.Log("Marking a custom condition to be false") conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") - g.Expect(testEnv.Status().Update(ctx, objCopy)).To(Succeed()) + g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") g.Expect(obj.ResourceVersion).NotTo(Equal(objCopy.ResourceVersion)) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Marking Ready=True") @@ -247,7 +247,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -267,29 +267,29 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) objCopy := obj.DeepCopy() t.Log("Marking a custom condition to be false") conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") - g.Expect(testEnv.Status().Update(ctx, objCopy)).To(Succeed()) + g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") g.Expect(obj.ResourceVersion).NotTo(Equal(objCopy.ResourceVersion)) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Changing the object spec, status, and adding Ready=True condition") @@ -305,7 +305,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") objAfter := obj.DeepCopy() g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -328,29 +328,29 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) objCopy := obj.DeepCopy() t.Log("Marking a custom condition to be false") conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") - g.Expect(testEnv.Status().Update(ctx, objCopy)).To(Succeed()) + g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") g.Expect(obj.ResourceVersion).NotTo(Equal(objCopy.ResourceVersion)) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Marking Ready=True") @@ -362,7 +362,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has not been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } ok, _ := ContainElement(objCopy.Status.Conditions[0]).Match(objAfter.Status.Conditions) @@ -376,29 +376,29 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) objCopy := obj.DeepCopy() t.Log("Marking a custom condition to be false") conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") - g.Expect(testEnv.Status().Update(ctx, objCopy)).To(Succeed()) + g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") g.Expect(obj.ResourceVersion).NotTo(Equal(objCopy.ResourceVersion)) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Marking Ready=True") @@ -410,7 +410,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -427,29 +427,29 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) objCopy := obj.DeepCopy() t.Log("Marking a custom condition to be false") conditions.MarkFalse(objCopy, clusterv1.ReadyCondition, "reason", clusterv1.ConditionSeverityInfo, "message") - g.Expect(testEnv.Status().Update(ctx, objCopy)).To(Succeed()) + g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") g.Expect(obj.ResourceVersion).NotTo(Equal(objCopy.ResourceVersion)) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Marking Ready=True") @@ -461,7 +461,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -488,20 +488,20 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Adding a finalizer") @@ -513,7 +513,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -528,20 +528,20 @@ func TestPatchHelper(t *testing.T) { obj.Finalizers = append(obj.Finalizers, clusterv1.ClusterFinalizer) t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Removing the finalizers") @@ -553,7 +553,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -568,20 +568,20 @@ func TestPatchHelper(t *testing.T) { obj.ObjectMeta.Namespace = "default" t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Updating the object spec") @@ -598,7 +598,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -613,20 +613,20 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Updating the object status") @@ -638,7 +638,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } return reflect.DeepEqual(objAfter.Status, obj.Status) @@ -652,20 +652,20 @@ func TestPatchHelper(t *testing.T) { obj.ObjectMeta.Namespace = "default" t.Log("Creating the object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Updating the object spec") @@ -688,7 +688,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -721,20 +721,20 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the MachineSet object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Updating the object spec") @@ -746,7 +746,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -761,20 +761,20 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the MachineSet object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Updating the object spec") @@ -795,7 +795,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } @@ -811,24 +811,24 @@ func TestPatchHelper(t *testing.T) { obj := obj.DeepCopy() t.Log("Creating the MachineSet object") - g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + g.Expect(env.Create(ctx, obj)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + g.Expect(env.Delete(ctx, obj)).To(Succeed()) }() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} t.Log("Checking that the object has been created") g.Eventually(func() error { obj := obj.DeepCopy() - return testEnv.Get(ctx, key, obj) + return env.Get(ctx, key, obj) }).Should(Succeed()) obj.Status.ObservedGeneration = obj.GetGeneration() lastGeneration := obj.GetGeneration() - g.Expect(testEnv.Status().Update(ctx, obj)) + g.Expect(env.Status().Update(ctx, obj)) t.Log("Creating a new patch helper") - patcher, err := NewHelper(obj, testEnv) + patcher, err := NewHelper(obj, env) g.Expect(err).NotTo(HaveOccurred()) t.Log("Patching the object") @@ -837,7 +837,7 @@ func TestPatchHelper(t *testing.T) { t.Log("Validating the object has been updated") g.Eventually(func() bool { objAfter := obj.DeepCopy() - if err := testEnv.Get(ctx, key, objAfter); err != nil { + if err := env.Get(ctx, key, objAfter); err != nil { return false } return lastGeneration == objAfter.Status.ObservedGeneration @@ -870,16 +870,16 @@ func TestPatchHelper(t *testing.T) { }, } - g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.Create(ctx, cluster)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, cluster)).To(Succeed()) + g.Expect(env.Delete(ctx, cluster)).To(Succeed()) }() - g.Expect(testEnv.Create(ctx, machineSet)).To(Succeed()) + g.Expect(env.Create(ctx, machineSet)).To(Succeed()) defer func() { - g.Expect(testEnv.Delete(ctx, machineSet)).To(Succeed()) + g.Expect(env.Delete(ctx, machineSet)).To(Succeed()) }() - patcher, err := NewHelper(cluster, testEnv) + patcher, err := NewHelper(cluster, env) g.Expect(err).NotTo(HaveOccurred()) g.Expect(patcher.Patch(ctx, machineSet)).NotTo(Succeed()) diff --git a/util/patch/suite_test.go b/util/patch/suite_test.go index 605f17d34794..cab7a12c5613 100644 --- a/util/patch/suite_test.go +++ b/util/patch/suite_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "sigs.k8s.io/cluster-api/test/helpers" + "sigs.k8s.io/cluster-api/internal/envtest" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports ) @@ -32,27 +32,27 @@ const ( ) var ( - testEnv *helpers.TestEnvironment - ctx = ctrl.SetupSignalHandler() + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { fmt.Println("Creating new test environment") - testEnv = helpers.NewTestEnvironment() + env = envtest.New() go func() { fmt.Println("Starting the manager") - if err := testEnv.StartManager(ctx); err != nil { + if err := env.StartManager(ctx); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() - <-testEnv.Manager.Elected() - testEnv.WaitForWebhooks() + <-env.Manager.Elected() + env.WaitForWebhooks() code := m.Run() fmt.Println("Tearing down test suite") - if err := testEnv.Stop(); err != nil { + if err := env.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop envtest: %v", err)) }