diff --git a/tests/vm_test.go b/tests/vm_test.go index 947b55cfe5d8..056c54c1a203 100644 --- a/tests/vm_test.go +++ b/tests/vm_test.go @@ -1969,12 +1969,25 @@ status: By("Blocking virt-handler from reconciling the VMI") libpod.AddKubernetesApiBlackhole(getHandlerNodePod(virtClient, nodeName), componentName) - Eventually(getHandlerNodePod(virtClient, nodeName).Items[0], 120*time.Second, time.Second, HaveConditionFalse(k8sv1.PodReady)) + Eventually(func() k8sv1.Pod { + return getHandlerNodePod(virtClient, nodeName).Items[0] + }, 120*time.Second, time.Second).Should(HaveConditionFalse(k8sv1.PodReady)) - DeferCleanup(func() { + defer func() { libpod.DeleteKubernetesApiBlackhole(getHandlerNodePod(virtClient, nodeName), componentName) - Eventually(getHandlerNodePod(virtClient, nodeName).Items[0], 120*time.Second, time.Second, HaveConditionTrue(k8sv1.PodReady)) - }) + Eventually(func() k8sv1.Pod { + return getHandlerNodePod(virtClient, nodeName).Items[0] + }, 120*time.Second, time.Second).Should(HaveConditionTrue(k8sv1.PodReady)) + + // FIXME: this is just a test to see if the flakiness is reduced + migrationBandwidth := resource.MustParse("1Mi") + kv := util.GetCurrentKv(virtClient) + kv.Spec.Configuration.MigrationConfiguration = &v1.MigrationConfiguration{ + BandwidthPerMigration: &migrationBandwidth, + } + kv = testsuite.UpdateKubeVirtConfigValue(kv.Spec.Configuration) + tests.WaitForConfigToBePropagatedToComponent("kubevirt.io=virt-handler", kv.ResourceVersion, tests.ExpectResourceVersionToBeLessEqualThanConfigVersion, 120*time.Second) + }() pod, err := libvmi.GetPodByVirtualMachineInstance(vmi, vmi.Namespace) Expect(err).ToNot(HaveOccurred())