From e43402d54d6e2b59af21ff1b42235c01123fce9e Mon Sep 17 00:00:00 2001 From: Javier Cano Cano Date: Mon, 29 Jan 2024 13:00:35 +0100 Subject: [PATCH] fix: `Eventually()` missing `Should()` statement The linter enforces the usage of `Should()` statements when a `Eventually` check is used. Also, `DeferCleanup` has been dropped in favor of `AfterEach`. The [`resetToDefaultConfig()` ](https://github.com/kubevirt/kubevirt/blob/cb1b6e53540189d6664c4a8c126ab6e0a84ff8c4/tests/utils.go#L1842) is called before the test `DeferCleanup`, creating the following error: "resource & config versions (5548 and 4736 respectively) are not as expected. component: \"virt-handler\", pod: \"virt-handler-zdv7f\" " This is because the `virt-handler` will not be ready (intentionally for the purposes of the test), and the `resetToDefaultConfig()` will force the `virt-handler` to reconcile, which will fail, but the `virt-handler` `resourceVersion` will be updated. Therefore, the Kubevirt object will be out of sync. Signed-off-by: Javier Cano Cano --- tests/vm_test.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/tests/vm_test.go b/tests/vm_test.go index 947b55cfe5d8..02b801796c63 100644 --- a/tests/vm_test.go +++ b/tests/vm_test.go @@ -1957,6 +1957,17 @@ status: Context("[Serial] when node becomes unhealthy", Serial, func() { const componentName = "virt-handler" + var nodeName string + + AfterEach(func() { + libpod.DeleteKubernetesApiBlackhole(getHandlerNodePod(virtClient, nodeName), componentName) + Eventually(func(g Gomega) { + g.Expect(getHandlerNodePod(virtClient, nodeName).Items[0]).To(HaveConditionTrue(k8sv1.PodReady)) + }, 120*time.Second, time.Second).Should(Succeed()) + + tests.WaitForConfigToBePropagatedToComponent("kubevirt.io=virt-handler", util.GetCurrentKv(virtClient).ResourceVersion, + tests.ExpectResourceVersionToBeLessEqualThanConfigVersion, 120*time.Second) + }) It("[Serial] the VMs running in that node should be respawned", func() { By("Starting VM") @@ -1964,17 +1975,14 @@ status: vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &k8smetav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - nodeName := vmi.Status.NodeName + nodeName = vmi.Status.NodeName oldUID := vmi.UID By("Blocking virt-handler from reconciling the VMI") libpod.AddKubernetesApiBlackhole(getHandlerNodePod(virtClient, nodeName), componentName) - Eventually(getHandlerNodePod(virtClient, nodeName).Items[0], 120*time.Second, time.Second, HaveConditionFalse(k8sv1.PodReady)) - - DeferCleanup(func() { - libpod.DeleteKubernetesApiBlackhole(getHandlerNodePod(virtClient, nodeName), componentName) - Eventually(getHandlerNodePod(virtClient, nodeName).Items[0], 120*time.Second, time.Second, HaveConditionTrue(k8sv1.PodReady)) - }) + Eventually(func(g Gomega) { + g.Expect(getHandlerNodePod(virtClient, nodeName).Items[0]).To(HaveConditionFalse(k8sv1.PodReady)) + }, 120*time.Second, time.Second).Should(Succeed()) pod, err := libvmi.GetPodByVirtualMachineInstance(vmi, vmi.Namespace) Expect(err).ToNot(HaveOccurred())