diff --git a/pkg/awsiamauth/reconciler/reconciler_test.go b/pkg/awsiamauth/reconciler/reconciler_test.go index 5efc0d2ff3a3..5fcb6bc2257d 100644 --- a/pkg/awsiamauth/reconciler/reconciler_test.go +++ b/pkg/awsiamauth/reconciler/reconciler_test.go @@ -206,7 +206,7 @@ func TestReconcileKCPObjectNotFound(t *testing.T) { r := reconciler.New(certs, generateUUID, cl, remoteClientRegistry) result, err := r.Reconcile(ctx, nullLog(), cluster) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(Equal(controller.ResultWithRequeue(5 * time.Second))) + g.Expect(result).To(Equal(controller.ResultWithRequeue(4 * time.Second))) } func TestReconcileRemoteGetClientError(t *testing.T) { diff --git a/pkg/controller/clusters/clusterapi.go b/pkg/controller/clusters/clusterapi.go index 69f635da0312..c0e0e650941f 100644 --- a/pkg/controller/clusters/clusterapi.go +++ b/pkg/controller/clusters/clusterapi.go @@ -25,7 +25,7 @@ func CheckControlPlaneReady(ctx context.Context, client client.Client, log logr. if kcp == nil { log.Info("KCP does not exist yet, requeuing") - return controller.ResultWithRequeue(5 * time.Second), nil + return controller.ResultWithRequeue(4 * time.Second), nil } // We make sure to check that the status is up to date before using it diff --git a/pkg/controller/clusters/clusterapi_test.go b/pkg/controller/clusters/clusterapi_test.go index 16fe10419e13..9f4a13eb2c16 100644 --- a/pkg/controller/clusters/clusterapi_test.go +++ b/pkg/controller/clusters/clusterapi_test.go @@ -55,7 +55,7 @@ func TestCheckControlPlaneReadyNoKcp(t *testing.T) { result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal( - controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}), + controller.Result{Result: &controllerruntime.Result{RequeueAfter: 4 * time.Second}}), ) } diff --git a/pkg/providers/docker/reconciler/reconciler_test.go b/pkg/providers/docker/reconciler/reconciler_test.go index 437c1f7bef93..c549e2d9aedf 100644 --- a/pkg/providers/docker/reconciler/reconciler_test.go +++ b/pkg/providers/docker/reconciler/reconciler_test.go @@ -14,6 +14,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -38,9 +39,7 @@ const ( clusterNamespace = "test-namespace" ) -func TestReconcilerReconcileSuccess(t *testing.T) { - t.Skip("Flaky (https://github.com/aws/eks-anywhere/issues/6996)") - +func TestDockerReconcilerReconcileSuccess(t *testing.T) { tt := newReconcilerTest(t) logger := test.NewNullLogger() @@ -476,14 +475,17 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { c.Spec.EksaVersion = &version }) + kcpVersion := "v1.19.8" kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) { kcp.Name = cluster.Name + kcp.ObjectMeta.Generation = 2 kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ Name: fmt.Sprintf("%s-control-plane-1", cluster.Name), }, }, + Version: kcpVersion, } kcp.Status = controlplanev1.KubeadmControlPlaneStatus{ Conditions: clusterv1.Conditions{ @@ -494,6 +496,8 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { }, }, ObservedGeneration: 2, + Ready: true, + Version: pointer.String(kcpVersion), } }) diff --git a/pkg/providers/snow/reconciler/reconciler_test.go b/pkg/providers/snow/reconciler/reconciler_test.go index 6567487f190a..32700678ecb3 100644 --- a/pkg/providers/snow/reconciler/reconciler_test.go +++ b/pkg/providers/snow/reconciler/reconciler_test.go @@ -38,9 +38,7 @@ const ( clusterNamespace = "test-namespace" ) -func TestReconcilerReconcileSuccess(t *testing.T) { - t.Skip("Flaky (https://github.com/aws/eks-anywhere/issues/6996)") - +func TestSnowReconcilerReconcileSuccess(t *testing.T) { tt := newReconcilerTest(t) // We want to check that the cluster status is cleaned up if validations are passed tt.cluster.SetFailure(anywherev1.FailureReasonType("InvalidCluster"), "invalid cluster") @@ -241,7 +239,9 @@ func TestReconcilerCheckControlPlaneReadyItIsReady(t *testing.T) { LastTransitionTime: metav1.NewTime(time.Now()), }, }, - Version: pointer.String(kcpVersion), + ObservedGeneration: 2, + Version: pointer.String(kcpVersion), + Ready: true, } tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp) tt.withFakeClient() @@ -398,14 +398,17 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { c.Spec.EksaVersion = &version }) + kcpVersion := "v1.19.8" kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) { kcp.Name = cluster.Name + kcp.ObjectMeta.Generation = 2 kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ Name: fmt.Sprintf("%s-control-plane-1", cluster.Name), }, }, + Version: kcpVersion, } kcp.Status = controlplanev1.KubeadmControlPlaneStatus{ Conditions: clusterv1.Conditions{ @@ -416,6 +419,8 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { }, }, ObservedGeneration: 2, + Ready: true, + Version: pointer.String(kcpVersion), } }) diff --git a/pkg/providers/tinkerbell/reconciler/reconciler_test.go b/pkg/providers/tinkerbell/reconciler/reconciler_test.go index 4efa36d966b9..18cb56114331 100644 --- a/pkg/providers/tinkerbell/reconciler/reconciler_test.go +++ b/pkg/providers/tinkerbell/reconciler/reconciler_test.go @@ -14,6 +14,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -55,9 +56,7 @@ func TestReconcilerGenerateSpec(t *testing.T) { tt.cleanup() } -func TestReconcilerReconcileSuccess(t *testing.T) { - t.Skip("Flaky (https://github.com/aws/eks-anywhere/issues/6996)") - +func TestTinkerbellReconcilerReconcileSuccess(t *testing.T) { tt := newReconcilerTest(t) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { @@ -937,15 +936,17 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { c.Spec.EksaVersion = &version }) + kcpVersion := "v1.19.8" kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) { kcp.Name = cluster.Name + kcp.ObjectMeta.Generation = 2 kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ Name: fmt.Sprintf("%s-control-plane-1", cluster.Name), }, }, - Version: "v1.19.8", + Version: kcpVersion, Replicas: ptr.Int32(1), } kcp.Status = controlplanev1.KubeadmControlPlaneStatus{ @@ -957,6 +958,8 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { }, }, ObservedGeneration: 2, + Ready: true, + Version: pointer.String(kcpVersion), } }) diff --git a/pkg/providers/vsphere/reconciler/reconciler_test.go b/pkg/providers/vsphere/reconciler/reconciler_test.go index 70970e166910..21ec02f1c268 100644 --- a/pkg/providers/vsphere/reconciler/reconciler_test.go +++ b/pkg/providers/vsphere/reconciler/reconciler_test.go @@ -13,6 +13,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -45,9 +46,7 @@ const ( clusterNamespace = "test-namespace" ) -func TestReconcilerReconcileSuccess(t *testing.T) { - t.Skip("Flaky (https://github.com/aws/eks-anywhere/issues/6996)") - +func TestVSphereReconcilerReconcileSuccess(t *testing.T) { tt := newReconcilerTest(t) // We want to check that the cluster status is cleaned up if validations are passed tt.cluster.SetFailure(anywherev1.FailureReasonType("InvalidCluster"), "invalid cluster") @@ -165,10 +164,9 @@ func TestSetupEnvVars(t *testing.T) { tt.Expect(err).To(BeNil()) } -func TestReconcilerControlPlaneIsNotReady(t *testing.T) { - t.Skip("Flaky (https://github.com/aws/eks-anywhere/issues/7000)") - +func TestVSphereReconcilerControlPlaneIsNotReady(t *testing.T) { tt := newReconcilerTest(t) + kcpVersion := "v1.19.8" tt.kcp.Status = controlplanev1.KubeadmControlPlaneStatus{ Conditions: clusterv1.Conditions{ { @@ -178,6 +176,8 @@ func TestReconcilerControlPlaneIsNotReady(t *testing.T) { }, }, ObservedGeneration: 2, + Ready: true, + Version: pointer.String(kcpVersion), } tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp) @@ -440,14 +440,17 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { c.Spec.EksaVersion = &version }) + kcpVersion := "v1.19.8" kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) { kcp.Name = cluster.Name + kcp.ObjectMeta.Generation = 2 kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ Name: fmt.Sprintf("%s-control-plane-1", cluster.Name), }, }, + Version: kcpVersion, } kcp.Status = controlplanev1.KubeadmControlPlaneStatus{ Conditions: clusterv1.Conditions{ @@ -458,6 +461,8 @@ func newReconcilerTest(t testing.TB) *reconcilerTest { }, }, ObservedGeneration: 2, + Ready: true, + Version: pointer.String(kcpVersion), } })