From 32739831cdd8041004303c64858256137adee710 Mon Sep 17 00:00:00 2001 From: Warren Fernandes Date: Fri, 18 Sep 2020 16:35:52 -0600 Subject: [PATCH] Use single testEnv for ginkgo and go tests --- .../machinehealthcheck_controller_test.go | 162 +++++++++--------- controllers/suite_test.go | 142 +++++---------- 2 files changed, 121 insertions(+), 183 deletions(-) diff --git a/controllers/machinehealthcheck_controller_test.go b/controllers/machinehealthcheck_controller_test.go index a6809bcabb7f..5638e348dada 100644 --- a/controllers/machinehealthcheck_controller_test.go +++ b/controllers/machinehealthcheck_controller_test.go @@ -116,17 +116,17 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name mhc.Labels = map[string]string{} mhc.SetGenerateName("correct-cluster-name-label-with-no-existing-labels-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) g.Eventually(func() map[string]string { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -139,7 +139,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name @@ -148,10 +148,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } mhc.SetGenerateName("correct-cluster-name-label-when-has-wrong-cluster-label-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) g.Eventually(func() map[string]string { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -164,7 +164,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name @@ -173,10 +173,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } mhc.SetGenerateName("correct-cluster-name-label-when-other-labels-present-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) g.Eventually(func() map[string]string { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -193,17 +193,17 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name mhc.OwnerReferences = []metav1.OwnerReference{} mhc.SetGenerateName("owner-reference-when-no-existing-ones-exist-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) g.Eventually(func() []metav1.OwnerReference { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { fmt.Printf("error cannot retrieve mhc in ctx: %v", err) return nil @@ -220,7 +220,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name @@ -229,10 +229,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } mhc.SetGenerateName("owner-reference-when-modifying-existing-ones-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) g.Eventually(func() []metav1.OwnerReference { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -250,13 +250,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name mhc.SetGenerateName("all-machines-healthy-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) // Healthy nodes and machines. _, machines, cleanup := fakeNodesMachines(g, 2, true, true, mhc, cluster) @@ -267,7 +267,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -285,12 +285,12 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name mhc.SetGenerateName("one-unhealthy-machine-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) // Healthy nodes and machines. _, machines, cleanup1 := fakeNodesMachines(g, 2, true, true, mhc, cluster) @@ -306,7 +306,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -324,7 +324,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name @@ -332,7 +332,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc.Spec.MaxUnhealthy = &maxUnhealthy mhc.SetGenerateName("unhealthy-machines-exceed-maxunhealthy-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) // Healthy nodes and machines. _, machines, cleanup1 := fakeNodesMachines(g, 1, true, true, mhc, cluster) @@ -348,7 +348,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -363,7 +363,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -381,7 +381,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -402,14 +402,14 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name mhc.SetGenerateName("no-noderef-for-less-than-the-nodestartuptimeout-") mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: 5 * time.Hour} - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) // Healthy nodes and machines. _, machines, cleanup1 := fakeNodesMachines(g, 2, true, true, mhc, cluster) @@ -425,7 +425,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -440,7 +440,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -458,7 +458,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -484,9 +484,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: time.Second} mhc.SetGenerateName("no-noderef-for-longer-than-the-nodestartuptimeout-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -505,7 +505,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the MHC status matches. We have two healthy machines and // one unhealthy. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { fmt.Printf("error retrieving mhc: %v", err) return nil @@ -521,7 +521,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -540,7 +540,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -562,13 +562,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name mhc.SetGenerateName("node-gone-away-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) // Healthy nodes and machines. nodes, machines, cleanup := fakeNodesMachines(g, 3, true, true, mhc, cluster) @@ -581,15 +581,15 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Forcibly remove the last machine's node. g.Eventually(func() bool { nodeToBeRemoved := nodes[2] - if err := testEnv1.Delete(ctx, nodeToBeRemoved); err != nil { + if err := testEnv.Delete(ctx, nodeToBeRemoved); err != nil { return apierrors.IsNotFound(err) } - return apierrors.IsNotFound(testEnv1.Get(ctx, util.ObjectKey(nodeToBeRemoved), nodeToBeRemoved)) + return apierrors.IsNotFound(testEnv.Get(ctx, util.ObjectKey(nodeToBeRemoved), nodeToBeRemoved)) }).Should(BeTrue()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -604,7 +604,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -622,7 +622,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -643,13 +643,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { ctx := context.TODO() cluster, mhc := setup(g) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) mhc.Spec.ClusterName = cluster.Name mhc.SetGenerateName("reach-node-transition-unhealthy-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) // Healthy nodes and machines. nodes, machines, cleanup := fakeNodesMachines(g, 1, true, true, mhc, cluster) @@ -661,7 +661,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -683,11 +683,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), }, } - g.Expect(testEnv1.Status().Patch(ctx, node, nodePatch)).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, node, nodePatch)).To(Succeed()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -702,7 +702,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -720,7 +720,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -761,7 +761,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { infraTmpl.SetGenerateName("mhc-ms-template-") infraTmpl.SetNamespace(mhc.Namespace) - g.Expect(testEnv1.Create(ctx, infraTmpl)).To(Succeed()) + g.Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) machineSet := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ @@ -791,12 +791,12 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { }, } machineSet.Default() - g.Expect(testEnv1.Create(ctx, machineSet)).To(Succeed()) + g.Expect(testEnv.Create(ctx, machineSet)).To(Succeed()) // Calculate how many Machines have health check succeeded = false. g.Eventually(func() int { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -809,10 +809,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc.Spec.ClusterName = cluster.Name mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: time.Second} mhc.SetGenerateName("unhealthy-machines-deleted-") - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) // defer cleanup for all the objects that have been created defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc, infraTmpl, machineSet) // Pause the MachineSet reconciler. WHY?? I SEE WHAT IS HAPPENING BUT @@ -821,13 +821,13 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { machineSet.Annotations = map[string]string{ clusterv1.PausedAnnotation: "", } - g.Expect(testEnv1.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) + g.Expect(testEnv.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) // Calculate how many Machines should be remediated. var unhealthyMachine *clusterv1.Machine g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -846,12 +846,12 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Unpause the MachineSet reconciler. machineSetPatch = client.MergeFrom(machineSet.DeepCopy()) delete(machineSet.Annotations, clusterv1.PausedAnnotation) - g.Expect(testEnv1.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) + g.Expect(testEnv.Patch(ctx, machineSet, machineSetPatch)).To(Succeed()) // Make sure the Machine gets deleted. g.Eventually(func() bool { machine := unhealthyMachine.DeepCopy() - err := testEnv1.Get(ctx, util.ObjectKey(unhealthyMachine), machine) + err := testEnv.Get(ctx, util.ObjectKey(unhealthyMachine), machine) return apierrors.IsNotFound(err) || !machine.DeletionTimestamp.IsZero() }, timeout, time.Second) }) @@ -864,9 +864,9 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc.Spec.ClusterName = cluster.Name - g.Expect(testEnv1.Create(ctx, mhc)).To(Succeed()) + g.Expect(testEnv.Create(ctx, mhc)).To(Succeed()) defer func(do ...runtime.Object) { - g.Expect(testEnv1.Cleanup(ctx, do...)).To(Succeed()) + g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) }(cluster, mhc) // Healthy nodes and machines. @@ -879,7 +879,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -896,7 +896,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { machines[0].Annotations = map[string]string{ clusterv1.PausedAnnotation: "", } - g.Expect(testEnv1.Patch(ctx, machines[0], machinePatch)).To(Succeed()) + g.Expect(testEnv.Patch(ctx, machines[0], machinePatch)).To(Succeed()) // Transition the node to unhealthy. node := nodes[0] @@ -908,11 +908,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now().Add(-10 * time.Minute)), }, } - g.Expect(testEnv1.Status().Patch(ctx, node, nodePatch)).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, node, nodePatch)).To(Succeed()) // Make sure the status matches. g.Eventually(func() *clusterv1.MachineHealthCheckStatus { - err := testEnv1.Get(ctx, util.ObjectKey(mhc), mhc) + err := testEnv.Get(ctx, util.ObjectKey(mhc), mhc) if err != nil { return nil } @@ -927,7 +927,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have health check succeeded = false. g.Eventually(func() (unhealthy int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -945,7 +945,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { // Calculate how many Machines have been remediated. g.Eventually(func() (remediated int) { machines := &clusterv1.MachineList{} - err := testEnv1.List(ctx, machines, client.MatchingLabels{ + err := testEnv.List(ctx, machines, client.MatchingLabels{ "selector": mhc.Spec.Selector.MatchLabels["selector"], }) if err != nil { @@ -963,7 +963,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } func setup(g *WithT) (*clusterv1.Cluster, *clusterv1.MachineHealthCheck) { - ns, err := testEnv1.CreateNamespace(ctx, "test-ns") + ns, err := testEnv.CreateNamespace(ctx, "test-ns") g.Expect(err).ToNot(HaveOccurred()) cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -971,13 +971,13 @@ func setup(g *WithT) (*clusterv1.Cluster, *clusterv1.MachineHealthCheck) { Namespace: ns.Name, }, } - g.Expect(testEnv1.Create(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) g.Eventually(func() error { var cl clusterv1.Cluster - return testEnv1.Get(ctx, util.ObjectKey(cluster), &cl) + return testEnv.Get(ctx, util.ObjectKey(cluster), &cl) }, timeout, time.Second).Should(Succeed()) - g.Expect(testEnv1.CreateKubeconfigSecret(cluster)).To(Succeed()) + g.Expect(testEnv.CreateKubeconfigSecret(cluster)).To(Succeed()) mhc := &clusterv1.MachineHealthCheck{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-mhc-", @@ -1005,7 +1005,7 @@ func setup(g *WithT) (*clusterv1.Cluster, *clusterv1.MachineHealthCheck) { func ownerReferenceForCluster(ctx context.Context, g *WithT, c *clusterv1.Cluster) metav1.OwnerReference { // Fetch the cluster to populate the UID cc := &clusterv1.Cluster{} - g.Expect(testEnv1.GetClient().Get(ctx, util.ObjectKey(c), cc)).To(Succeed()) + g.Expect(testEnv.GetClient().Get(ctx, util.ObjectKey(c), cc)).To(Succeed()) return metav1.OwnerReference{ APIVersion: clusterv1.GroupVersion.String(), @@ -1036,7 +1036,7 @@ func fakeNodesMachines( if !healthy { setNodeUnhealthy(&node) } - g.Expect(testEnv1.Create(ctx, infraMachine)).To(Succeed()) + g.Expect(testEnv.Create(ctx, infraMachine)).To(Succeed()) fmt.Printf("inframachine created: %s\n", infraMachine.GetName()) machine.Spec.InfrastructureRef = corev1.ObjectReference{ APIVersion: infraMachine.GetAPIVersion(), @@ -1047,9 +1047,9 @@ func fakeNodesMachines( // the InfraMachine look like its healthy and ready. infraMachinePatch := client.MergeFrom(infraMachine.DeepCopy()) g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) - g.Expect(testEnv1.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) - g.Expect(testEnv1.Create(ctx, &machine)).To(Succeed()) + g.Expect(testEnv.Create(ctx, &machine)).To(Succeed()) fmt.Printf("machine created: %s\n", machine.GetName()) // Before moving on we want to ensure that the machine has a valid @@ -1059,7 +1059,7 @@ func fakeNodesMachines( Name: machine.GetName(), Namespace: machine.GetNamespace(), } - err := testEnv1.Get(ctx, k, &machine) + err := testEnv.Get(ctx, k, &machine) if err != nil { return nil } @@ -1070,12 +1070,12 @@ func fakeNodesMachines( machineStatus := machine.Status node.Spec.ProviderID = providerID nodeStatus := node.Status - g.Expect(testEnv1.Create(ctx, &node)).To(Succeed()) + g.Expect(testEnv.Create(ctx, &node)).To(Succeed()) fmt.Printf("node created: %s\n", node.GetName()) nodePatch := client.MergeFrom(node.DeepCopy()) node.Status = nodeStatus - g.Expect(testEnv1.Status().Patch(ctx, &node, nodePatch)).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, &node, nodePatch)).To(Succeed()) nodes = append(nodes, &node) machinePatch := client.MergeFrom(machine.DeepCopy()) @@ -1088,7 +1088,7 @@ func fakeNodesMachines( // original time so that the patch works. lastUp := metav1.NewTime(machine.Status.LastUpdated.Add(time.Second)) machine.Status.LastUpdated = &lastUp - g.Expect(testEnv1.Status().Patch(ctx, &machine, machinePatch)).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, &machine, machinePatch)).To(Succeed()) } machines = append(machines, &machine) @@ -1097,12 +1097,12 @@ func fakeNodesMachines( cleanup := func() { fmt.Println("Cleaning up nodes and machines") for _, node := range nodes { - if err := testEnv1.Delete(ctx, node); !apierrors.IsNotFound(err) { + if err := testEnv.Delete(ctx, node); !apierrors.IsNotFound(err) { g.Expect(err).NotTo(HaveOccurred()) } } for _, machine := range machines { - g.Expect(testEnv1.Delete(ctx, machine)).To(Succeed()) + g.Expect(testEnv.Delete(ctx, machine)).To(Succeed()) } } diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 63e60916a9b3..83dff5eb7c3b 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -39,175 +39,113 @@ import ( // +kubebuilder:scaffold:imports ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - const ( timeout = time.Second * 30 ) var ( - testEnv *helpers.TestEnvironment - ctx = context.Background() - testEnv1 *helpers.TestEnvironment + testEnv *helpers.TestEnvironment + ctx = context.Background() ) func TestMain(m *testing.M) { fmt.Println("Creating new test environment") - testEnv1 = helpers.NewTestEnvironment() + testEnv = helpers.NewTestEnvironment() + log.SetLogger(klogr.New()) // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers // requiring a connection to a remote cluster tracker, err := remote.NewClusterCacheTracker( log.Log, - testEnv1.Manager, + testEnv.Manager, ) if err != nil { panic(fmt.Sprintf("unable to create cluster cache tracker: %v", err)) } if err := (&remote.ClusterCacheReconciler{ - Client: testEnv1, + Client: testEnv, Log: log.Log, Tracker: tracker, - }).SetupWithManager(testEnv1.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start ClusterCacheReconciler: %v", err)) } - // TODO: (wfernandes) Do we need these reconcilers for the MHC tests? - // They may be used for other tests in this package that are using testEnv + if err := (&ClusterReconciler{ + Client: testEnv, + Log: log.Log.WithName("controllers").WithName("Cluster"), + recorder: testEnv.GetEventRecorderFor("cluster-controller"), + }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + panic(fmt.Sprintf("Failed to start ClusterReconciler: %v", err)) + } if err := (&MachineReconciler{ - Client: testEnv1, + Client: testEnv, Log: log.Log, Tracker: tracker, - recorder: testEnv1.GetEventRecorderFor("machine-controller"), - }).SetupWithManager(testEnv1.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + recorder: testEnv.GetEventRecorderFor("machine-controller"), + }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MachineReconciler: %v", err)) } if err := (&MachineSetReconciler{ - Client: testEnv1, + Client: testEnv, Log: log.Log, Tracker: tracker, - recorder: testEnv1.GetEventRecorderFor("machineset-controller"), - }).SetupWithManager(testEnv1.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + recorder: testEnv.GetEventRecorderFor("machineset-controller"), + }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MMachineSetReconciler: %v", err)) } - // if err := (&MachineDeploymentReconciler{ - // Client: testEnv1, - // Log: log.Log, - // recorder: testEnv1.GetEventRecorderFor("machinedeployment-controller"), - // }).SetupWithManager(testEnv1.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { - // panic(fmt.Sprintf("Failed to start MMachineDeploymentReconciler: %v", err)) - // } + if err := (&MachineDeploymentReconciler{ + Client: testEnv, + Log: log.Log, + recorder: testEnv.GetEventRecorderFor("machinedeployment-controller"), + }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + panic(fmt.Sprintf("Failed to start MMachineDeploymentReconciler: %v", err)) + } if err := (&MachineHealthCheckReconciler{ - Client: testEnv1, + Client: testEnv, Log: log.Log.WithName("controllers").WithName("MachineHealthCheck"), Tracker: tracker, - recorder: testEnv1.GetEventRecorderFor("machinehealthcheck-controller"), - }).SetupWithManager(testEnv1.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + recorder: testEnv.GetEventRecorderFor("machinehealthcheck-controller"), + }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MachineHealthCheckReconciler : %v", err)) } if err := (&ClusterReconciler{ - Client: testEnv1, + Client: testEnv, Log: log.Log.WithName("controllers").WithName("Cluster"), - recorder: testEnv1.GetEventRecorderFor("cluster-controller"), - }).SetupWithManager(testEnv1.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { + recorder: testEnv.GetEventRecorderFor("cluster-controller"), + }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start ClusterReconciler : %v", err)) } go func() { - if err := testEnv1.StartManager(); err != nil { + fmt.Println("Starting the manager") + if err := testEnv.StartManager(); err != nil { panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) } }() // wait for webhook port to be open prior to running tests - testEnv1.WaitForWebhooks() + testEnv.WaitForWebhooks() code := m.Run() fmt.Println("Tearing down test suite") - if err := testEnv1.Stop(); err != nil { + if err := testEnv.Stop(); err != nil { panic(fmt.Sprintf("Failed to stop envtest: %v", err)) } os.Exit(code) } +// TestAPIs will run the ginkgo tests. +// This will run with the testEnv setup and teardown in TestMain. func TestAPIs(t *testing.T) { SetDefaultEventuallyPollingInterval(100 * time.Millisecond) SetDefaultEventuallyTimeout(timeout) RegisterFailHandler(Fail) RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", + "Controllers Suite", []Reporter{printer.NewlineReporter{}}) } -var _ = BeforeSuite(func(done Done) { - By("bootstrapping test environment") - testEnv = helpers.NewTestEnvironment() - log.SetLogger(klogr.New()) - - // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers - // requiring a connection to a remote cluster - tracker, err := remote.NewClusterCacheTracker( - log.Log, - testEnv.Manager, - ) - Expect(err).ToNot(HaveOccurred()) - - Expect((&remote.ClusterCacheReconciler{ - Client: testEnv, - Log: log.Log, - Tracker: tracker, - }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) - - clusterReconciler := &ClusterReconciler{ - Client: testEnv, - Log: log.Log.WithName("controllers").WithName("Cluster"), - recorder: testEnv.GetEventRecorderFor("cluster-controller"), - } - Expect(clusterReconciler.SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) - Expect((&MachineReconciler{ - Client: testEnv, - Log: log.Log, - Tracker: tracker, - recorder: testEnv.GetEventRecorderFor("machine-controller"), - }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) - Expect((&MachineSetReconciler{ - Client: testEnv, - Log: log.Log, - Tracker: tracker, - recorder: testEnv.GetEventRecorderFor("machineset-controller"), - }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) - Expect((&MachineDeploymentReconciler{ - Client: testEnv, - Log: log.Log, - recorder: testEnv.GetEventRecorderFor("machinedeployment-controller"), - }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) - Expect((&MachineHealthCheckReconciler{ - Client: testEnv, - Log: log.Log.WithName("controllers").WithName("MachineHealthCheck"), - Tracker: tracker, - recorder: testEnv.GetEventRecorderFor("machinehealthcheck-controller"), - }).SetupWithManager(testEnv.Manager, controller.Options{MaxConcurrentReconciles: 1})).To(Succeed()) - - By("starting the manager") - go func() { - defer GinkgoRecover() - Expect(testEnv.StartManager()).To(Succeed()) - }() - - // wait for webhook port to be open prior to running tests - testEnv.WaitForWebhooks() - close(done) -}, 80) - -var _ = AfterSuite(func() { - if testEnv != nil { - By("tearing down the test environment") - Expect(testEnv.Stop()).To(Succeed()) - } -}) - func ContainRefOfGroupKind(group, kind string) types.GomegaMatcher { return &refGroupKindMatcher{ kind: kind,