From 2be809eee50cf573b8f112402f206e041bb68a35 Mon Sep 17 00:00:00 2001 From: kubevirt-bot Date: Wed, 1 Mar 2023 22:37:08 +0100 Subject: [PATCH 1/2] Merge pull request #9312 from awels/set_hotplug_cpu_mem_ratio_to_1 Set mem/cpu request and limits on hotplug attachment pod container to 1 to 1 ratio Signed-off-by: Alexander Wels --- .../services/renderresources.go | 20 +- tests/storage/hotplug.go | 204 ++++++++++++++++++ 2 files changed, 207 insertions(+), 17 deletions(-) diff --git a/pkg/virt-controller/services/renderresources.go b/pkg/virt-controller/services/renderresources.go index 883d1293fd2f..65d73402ab57 100644 --- a/pkg/virt-controller/services/renderresources.go +++ b/pkg/virt-controller/services/renderresources.go @@ -526,16 +526,9 @@ func initContainerMinimalRequests() k8sv1.ResourceList { } func hotplugContainerResourceRequirementsForVMI(vmi *v1.VirtualMachineInstance) k8sv1.ResourceRequirements { - if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() { - return k8sv1.ResourceRequirements{ - Limits: hotplugContainerMinimalLimits(), - Requests: hotplugContainerMinimalLimits(), - } - } else { - return k8sv1.ResourceRequirements{ - Limits: hotplugContainerMinimalLimits(), - Requests: hotplugContainerMinimalRequests(), - } + return k8sv1.ResourceRequirements{ + Limits: hotplugContainerMinimalLimits(), + Requests: hotplugContainerMinimalLimits(), } } @@ -545,10 +538,3 @@ func hotplugContainerMinimalLimits() k8sv1.ResourceList { k8sv1.ResourceMemory: resource.MustParse("80M"), } } - -func hotplugContainerMinimalRequests() k8sv1.ResourceList { - return k8sv1.ResourceList{ - k8sv1.ResourceCPU: resource.MustParse("10m"), - k8sv1.ResourceMemory: resource.MustParse("2M"), - } -} diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go index 48eefb6e1fbf..44c02cb1d61e 100644 --- a/tests/storage/hotplug.go +++ b/tests/storage/hotplug.go @@ -22,10 +22,13 @@ package storage import ( "context" "fmt" + "math" "path/filepath" "strconv" "time" + "kubevirt.io/kubevirt/tests/util" + "kubevirt.io/client-go/log" expect "github.com/google/goexpect" @@ -35,7 +38,9 @@ import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" v1 "kubevirt.io/api/core/v1" "kubevirt.io/client-go/kubecli" @@ -1160,6 +1165,205 @@ var _ = SIGDescribe("Hotplug", func() { }) }) + Context("with limit range in namespace", func() { + var ( + sc string + lr *corev1.LimitRange + orgCdiResourceRequirements *corev1.ResourceRequirements + ) + + createVMWithRatio := func(memRatio, cpuRatio float64) *v1.VirtualMachine { + vm := tests.NewRandomVMWithDataVolumeWithRegistryImport( + cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros), + testsuite.GetTestNamespace(nil), sc, corev1.ReadWriteOnce) + + memLimit := int64(1024 * 1024 * 128) //128Mi + memRequest := int64(math.Ceil(float64(memLimit) / memRatio)) + memRequestQuantity := resource.NewScaledQuantity(memRequest, 0) + memLimitQuantity := resource.NewScaledQuantity(memLimit, 0) + cpuLimit := int64(1) + cpuRequest := int64(math.Ceil(float64(cpuLimit) / cpuRatio)) + cpuRequestQuantity := resource.NewScaledQuantity(cpuRequest, 0) + cpuLimitQuantity := resource.NewScaledQuantity(cpuLimit, 0) + vm.Spec.Template.Spec.Domain.Resources.Requests[corev1.ResourceMemory] = *memRequestQuantity + vm.Spec.Template.Spec.Domain.Resources.Requests[corev1.ResourceCPU] = *cpuRequestQuantity + vm.Spec.Template.Spec.Domain.Resources.Limits = corev1.ResourceList{} + vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceMemory] = *memLimitQuantity + vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceCPU] = *cpuLimitQuantity + vm.Spec.Running = pointer.BoolPtr(true) + vm, err := virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(), vm) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + vm, err = virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + return vm.Status.Ready + }, 300*time.Second, 1*time.Second).Should(BeTrue()) + return vm + } + + updateCDIResourceRequirements := func(requirements *corev1.ResourceRequirements) { + if !libstorage.HasCDI() { + Skip("Test requires CDI CR to be available") + } + orgCdiConfig, err := virtClient.CdiClient().CdiV1beta1().CDIConfigs().Get(context.Background(), "config", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + cdi := libstorage.GetCDI(virtClient) + orgCdiResourceRequirements = cdi.Spec.Config.PodResourceRequirements + newCdi := cdi.DeepCopy() + newCdi.Spec.Config.PodResourceRequirements = requirements + _, err = virtClient.CdiClient().CdiV1beta1().CDIs().Update(context.Background(), newCdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + cdiConfig, _ := virtClient.CdiClient().CdiV1beta1().CDIConfigs().Get(context.Background(), "config", metav1.GetOptions{}) + if cdiConfig == nil { + return false + } + return cdiConfig.Generation > orgCdiConfig.Generation + }, 30*time.Second, 1*time.Second).Should(BeTrue()) + } + + updateCDIToRatio := func(memRatio, cpuRatio float64) { + memLimitQuantity := resource.MustParse("600M") + memLimit := memLimitQuantity.Value() + memRequest := int64(math.Ceil(float64(memLimit) / memRatio)) + memRequestQuantity := resource.NewScaledQuantity(memRequest, 0) + cpuLimitQuantity := resource.MustParse("750m") + cpuLimit := cpuLimitQuantity.AsDec().UnscaledBig().Int64() + GinkgoWriter.Printf("cpu limit %d\n", cpuLimit) + cpuRequest := int64(math.Ceil(float64(cpuLimit) / cpuRatio)) + cpuRequestQuantity := resource.NewScaledQuantity(cpuRequest, resource.Milli) + GinkgoWriter.Printf("cpu %v, memory %v\n", cpuRequest, memRequest) + updateCDIResourceRequirements(&corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *cpuRequestQuantity, + corev1.ResourceMemory: *memRequestQuantity, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpuLimitQuantity, + corev1.ResourceMemory: memLimitQuantity, + }, + }) + } + + createLimitRangeInNamespace := func(namespace string, memRatio, cpuRatio float64) { + lr = &corev1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("%s-lr", namespace), + }, + Spec: corev1.LimitRangeSpec{ + Limits: []corev1.LimitRangeItem{ + { + Type: corev1.LimitTypeContainer, + MaxLimitRequestRatio: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse(fmt.Sprintf("%f", memRatio)), + corev1.ResourceCPU: resource.MustParse(fmt.Sprintf("%f", cpuRatio)), + }, + Max: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + corev1.ResourceCPU: resource.MustParse("1"), + }, + Min: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("24Mi"), + corev1.ResourceCPU: resource.MustParse("1m"), + }, + }, + }, + }, + } + lr, err = virtClient.CoreV1().LimitRanges(namespace).Create(context.Background(), lr, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Ensuring LimitRange exists") + Eventually(func() error { + lr, err = virtClient.CoreV1().LimitRanges(namespace).Get(context.Background(), lr.Name, metav1.GetOptions{}) + return err + }, 30*time.Second, 1*time.Second).Should(BeNil()) + } + + BeforeEach(func() { + exists := false + sc, exists = libstorage.GetRWXBlockStorageClass() + + if !exists { + Skip("Skip test when RWXBlock storage class is not present") + } + }) + + AfterEach(func() { + if lr != nil { + err = virtClient.CoreV1().LimitRanges(namespace).Delete(context.Background(), lr.Name, metav1.DeleteOptions{}) + if !errors.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred()) + } + lr = nil + } + updateCDIResourceRequirements(orgCdiResourceRequirements) + orgCdiResourceRequirements = nil + }) + + // Needs to be serial since I am putting limit range on namespace + DescribeTable("hotplug volume should have mem ratio same as VMI with limit range applied", func(memRatio, cpuRatio float64) { + updateCDIToRatio(memRatio, cpuRatio) + createLimitRangeInNamespace(util.NamespaceTestDefault, memRatio, cpuRatio) + vm := createVMWithRatio(memRatio, cpuRatio) + dv := createDataVolumeAndWaitForImport(sc, corev1.PersistentVolumeBlock) + + vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + libwait.WaitForSuccessfulVMIStartWithTimeout(vmi, 240) + + By(addingVolumeRunningVM) + addDVVolumeVM(vm.Name, vm.Namespace, "testvolume", dv.Name, v1.DiskBusSCSI, false, "") + By(verifyingVolumeDiskInVM) + verifyVolumeAndDiskVMAdded(virtClient, vm, "testvolume") + vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + verifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume") + verifyVolumeStatus(vmi, v1.VolumeReady, "", "testvolume") + verifySingleAttachmentPod(vmi) + By("Verifying request/limit ratio on attachment pod") + podList, err := virtClient.CoreV1().Pods(vmi.Namespace).List(context.Background(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + var virtlauncherPod, attachmentPod corev1.Pod + By("Finding virt-launcher pod") + for _, pod := range podList.Items { + for _, ownerRef := range pod.GetOwnerReferences() { + if ownerRef.UID == vmi.GetUID() { + virtlauncherPod = pod + break + } + } + } + // Attachment pod is owned by virt-launcher pod + for _, pod := range podList.Items { + for _, ownerRef := range pod.GetOwnerReferences() { + if ownerRef.UID == virtlauncherPod.GetUID() { + attachmentPod = pod + break + } + } + } + Expect(attachmentPod.Name).To(ContainSubstring("hp-volume-")) + memLimit := attachmentPod.Spec.Containers[0].Resources.Limits.Memory().Value() + memRequest := attachmentPod.Spec.Containers[0].Resources.Requests.Memory().Value() + Expect(memRequest).To(Equal(memLimit)) + cpuLimit := attachmentPod.Spec.Containers[0].Resources.Limits.Cpu().Value() + cpuRequest := attachmentPod.Spec.Containers[0].Resources.Requests.Cpu().Value() + Expect(cpuRequest).To(Equal(cpuLimit)) + + By("Remove volume from a running VM") + removeVolumeVM(vm.Name, vm.Namespace, "testvolume", false) + _, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + }, + Entry("[Serial]1 to 1 cpu and mem ratio", Serial, float64(1), float64(1)), + Entry("[Serial]1 to 1 mem ratio, 4 to 1 cpu ratio", Serial, float64(1), float64(4)), + Entry("[Serial]2 to 1 mem ratio, 4 to 1 cpu ratio", Serial, float64(2), float64(4)), + Entry("[Serial]2.25 to 1 mem ratio, 5.75 to 1 cpu ratio", Serial, float64(2.25), float64(5.75)), + ) + }) + Context("hostpath", func() { var ( vm *v1.VirtualMachine From 6d2184051b4485dc7bb36e07d97929f3d2be1c01 Mon Sep 17 00:00:00 2001 From: Alexander Wels Date: Thu, 2 Mar 2023 09:32:12 -0600 Subject: [PATCH 2/2] Fix test for backport Signed-off-by: Alexander Wels --- tests/storage/hotplug.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go index 44c02cb1d61e..af1ffab765a8 100644 --- a/tests/storage/hotplug.go +++ b/tests/storage/hotplug.go @@ -27,8 +27,6 @@ import ( "strconv" "time" - "kubevirt.io/kubevirt/tests/util" - "kubevirt.io/client-go/log" expect "github.com/google/goexpect" @@ -1175,7 +1173,7 @@ var _ = SIGDescribe("Hotplug", func() { createVMWithRatio := func(memRatio, cpuRatio float64) *v1.VirtualMachine { vm := tests.NewRandomVMWithDataVolumeWithRegistryImport( cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros), - testsuite.GetTestNamespace(nil), sc, corev1.ReadWriteOnce) + util.NamespaceTestDefault, sc, corev1.ReadWriteOnce) memLimit := int64(1024 * 1024 * 128) //128Mi memRequest := int64(math.Ceil(float64(memLimit) / memRatio)) @@ -1191,10 +1189,10 @@ var _ = SIGDescribe("Hotplug", func() { vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceMemory] = *memLimitQuantity vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceCPU] = *cpuLimitQuantity vm.Spec.Running = pointer.BoolPtr(true) - vm, err := virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(), vm) + vm, err := virtClient.VirtualMachine(util.NamespaceTestDefault).Create(vm) Expect(err).ToNot(HaveOccurred()) Eventually(func() bool { - vm, err = virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + vm, err = virtClient.VirtualMachine(util.NamespaceTestDefault).Get(vm.Name, &metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) return vm.Status.Ready }, 300*time.Second, 1*time.Second).Should(BeTrue()) @@ -1309,15 +1307,15 @@ var _ = SIGDescribe("Hotplug", func() { vm := createVMWithRatio(memRatio, cpuRatio) dv := createDataVolumeAndWaitForImport(sc, corev1.PersistentVolumeBlock) - vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - libwait.WaitForSuccessfulVMIStartWithTimeout(vmi, 240) + tests.WaitForSuccessfulVMIStartWithTimeout(vmi, 240) By(addingVolumeRunningVM) addDVVolumeVM(vm.Name, vm.Namespace, "testvolume", dv.Name, v1.DiskBusSCSI, false, "") By(verifyingVolumeDiskInVM) verifyVolumeAndDiskVMAdded(virtClient, vm, "testvolume") - vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) verifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume") verifyVolumeStatus(vmi, v1.VolumeReady, "", "testvolume") @@ -1354,7 +1352,7 @@ var _ = SIGDescribe("Hotplug", func() { By("Remove volume from a running VM") removeVolumeVM(vm.Name, vm.Namespace, "testvolume", false) - _, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{}) + _, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) }, Entry("[Serial]1 to 1 cpu and mem ratio", Serial, float64(1), float64(1)),