diff --git a/tests/hotplug/BUILD.bazel b/tests/hotplug/BUILD.bazel index 5ba3ffbbc64d..2e2cdc3012a6 100644 --- a/tests/hotplug/BUILD.bazel +++ b/tests/hotplug/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "//pkg/libvmi:go_default_library", "//pkg/pointer:go_default_library", "//staging/src/kubevirt.io/api/core/v1:go_default_library", + "//staging/src/kubevirt.io/api/migrations/v1alpha1:go_default_library", "//staging/src/kubevirt.io/client-go/kubecli:go_default_library", "//tests:go_default_library", "//tests/console:go_default_library", diff --git a/tests/hotplug/cpu.go b/tests/hotplug/cpu.go index d8f75661b6c4..c7e7b16b83fa 100644 --- a/tests/hotplug/cpu.go +++ b/tests/hotplug/cpu.go @@ -8,9 +8,10 @@ import ( "kubevirt.io/kubevirt/tests/libnet" + migrationsv1 "kubevirt.io/api/migrations/v1alpha1" + "kubevirt.io/kubevirt/pkg/libvmi" "kubevirt.io/kubevirt/pkg/pointer" - "kubevirt.io/kubevirt/tests/framework/checks" "kubevirt.io/kubevirt/tests/libmigration" @@ -38,6 +39,7 @@ import ( . "kubevirt.io/kubevirt/tests/framework/matcher" "kubevirt.io/kubevirt/tests/libpod" "kubevirt.io/kubevirt/tests/libwait" + testsmig "kubevirt.io/kubevirt/tests/migration" ) var _ = Describe("[sig-compute][Serial]CPU Hotplug", decorators.SigCompute, decorators.SigComputeMigrations, decorators.RequiresTwoSchedulableNodes, decorators.VMLiveUpdateFeaturesGate, Serial, func() { @@ -260,6 +262,85 @@ var _ = Describe("[sig-compute][Serial]CPU Hotplug", decorators.SigCompute, deco Expect(reqCpu).To(Equal(expCpu.Value())) }) }) + + Context("Abort CPU change", func() { + It("Test automated workload update", func() { + vmi := libvmifact.NewAlpineWithTestTooling( + libnet.WithMasqueradeNetworking()..., + ) + vmi.Namespace = testsuite.GetTestNamespace(vmi) + vmi.Spec.Domain.CPU = &v1.CPU{ + Sockets: 1, + Cores: 2, + Threads: 1, + MaxSockets: 2, + } + By("Limiting the bandwidth of migrations in the test namespace") + policy := testsmig.PreparePolicyAndVMIWithBandwidthLimitation(vmi, resource.MustParse("1Ki")) + testsmig.CreateMigrationPolicy(virtClient, policy) + Eventually(func() *migrationsv1.MigrationPolicy { + policy, err := virtClient.MigrationPolicy().Get(context.Background(), policy.Name, metav1.GetOptions{}) + if err != nil { + return nil + } + return policy + }, 30*time.Second, time.Second).ShouldNot(BeNil()) + + vm := libvmi.NewVirtualMachine(vmi, libvmi.WithRunning()) + + vm, err := virtClient.VirtualMachine(vm.Namespace).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + Eventually(ThisVM(vm), 360*time.Second, 1*time.Second).Should(BeReady()) + libwait.WaitForSuccessfulVMIStart(vmi) + + // Update the CPU number and trigger the workload update + // and migration + By("Enabling the second socket") + p, err := patch.New(patch.WithAdd("/spec/template/spec/domain/cpu/sockets", 2)).GeneratePayload() + Expect(err).NotTo(HaveOccurred()) + _, err = virtClient.VirtualMachine(vm.Namespace).Patch(context.Background(), vm.Name, types.JSONPatchType, p, k8smetav1.PatchOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + migrations, err := virtClient.VirtualMachineInstanceMigration(vm.Namespace).List(&k8smetav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + for _, mig := range migrations.Items { + if mig.Spec.VMIName == vmi.Name { + return true + } + } + return false + }, 30*time.Second, time.Second).Should(BeTrue()) + + // Add annotation to cancel the workload update + vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + vmi.ObjectMeta.Annotations[v1.WorkloadUpdateMigrationAbortionAnnotation] = "" + p, err = patch.New(patch.WithAdd("/metadata/annotations", vmi.ObjectMeta.Annotations)).GeneratePayload() + Expect(err).ToNot(HaveOccurred()) + _, err = virtClient.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, p, metav1.PatchOptions{}) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + return metav1.HasAnnotation(vmi.ObjectMeta, v1.WorkloadUpdateMigrationAbortionAnnotation) + }, 30*time.Second, time.Second).Should(BeTrue()) + + // Wait until the migration is cancelled by the workload + // updater + Eventually(func() bool { + migrations, err := virtClient.VirtualMachineInstanceMigration(vm.Namespace).List(&k8smetav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + for _, mig := range migrations.Items { + if mig.Spec.VMIName == vmi.Name { + return true + } + } + return false + }, 30*time.Second, time.Second).Should(BeFalse()) + + }) + }) }) func patchWorkloadUpdateMethodAndRolloutStrategy(kvName string, virtClient kubecli.KubevirtClient, updateStrategy *v1.KubeVirtWorkloadUpdateStrategy, rolloutStrategy *v1.VMRolloutStrategy) {