Skip to content

Commit

Permalink
Merge pull request #6642 from ShellyKa13/include-hot-plug-in-vm-snapshot
Browse files Browse the repository at this point in the history
Include hotplugged disks in online VM snapshot
  • Loading branch information
kubevirt-bot committed Oct 27, 2021
2 parents fcb238b + ed4cb12 commit ed80fa5
Show file tree
Hide file tree
Showing 6 changed files with 252 additions and 294 deletions.
49 changes: 10 additions & 39 deletions pkg/virt-controller/watch/snapshot/snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ var _ = Describe("Snapshot controlleer", func() {
Namespace: vm.Namespace,
Name: vm.Name,
},
Spec: vm.Spec.Template.Spec,
}
}

Expand Down Expand Up @@ -810,42 +811,6 @@ var _ = Describe("Snapshot controlleer", func() {
controller.processVMSnapshotWorkItem()
})

It("should not lock source if online and have hotplug disks", func() {
vmSnapshot := createVMSnapshotInProgress()
vm := createVM()
vm.Spec.Running = &t
vmSource.Add(vm)
vmRevision := createVMRevision(vm)
crSource.Add(vmRevision)
vmi := createVMI(vm)
vmi.Status.VirtualMachineRevisionName = vmRevisionName
vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{
Name: "disk2",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "test-pvc2",
}},
},
})
vmiSource.Add(vmi)

updatedSnapshot := vmSnapshot.DeepCopy()
updatedSnapshot.Status.Phase = snapshotv1.InProgress
updatedSnapshot.ResourceVersion = "1"
updatedSnapshot.Status.Conditions = []snapshotv1.Condition{
newProgressingCondition(corev1.ConditionFalse, "Source not locked"),
newReadyCondition(corev1.ConditionFalse, "Not ready"),
}
updatedSnapshot.Status.Indications = []snapshotv1.Indication{
snapshotv1.VMSnapshotOnlineSnapshotIndication,
snapshotv1.VMSnapshotNoGuestAgentIndication,
}
expectVMSnapshotUpdate(vmSnapshotClient, updatedSnapshot)

addVirtualMachineSnapshot(vmSnapshot)
controller.processVMSnapshotWorkItem()
})

It("should lock source if pods using PVCs if VM is running", func() {
vmSnapshot := createVMSnapshotInProgress()
vm := createVM()
Expand Down Expand Up @@ -939,6 +904,9 @@ var _ = Describe("Snapshot controlleer", func() {
vmSnapshot := createVMSnapshotInProgress()
vm := createLockedVM()
vm.Spec.Running = &t
vm.Spec.Template.Spec.Domain.Resources.Requests = corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("32Mi"),
}

vmRevision := createVMRevision(vm)
crSource.Add(vmRevision)
Expand All @@ -947,9 +915,6 @@ var _ = Describe("Snapshot controlleer", func() {
vmiSource.Add(vmi)

vm.ObjectMeta.Annotations = map[string]string{}
// the content source will be equal to the vm revision data
expectedContent := createVirtualMachineSnapshotContent(vmSnapshot, vm)
vm.ObjectMeta.Generation = 2
vm.Spec.Template.Spec.Volumes = append(vm.Spec.Template.Spec.Volumes, v1.Volume{
Name: "disk2",
VolumeSource: v1.VolumeSource{
Expand All @@ -958,6 +923,12 @@ var _ = Describe("Snapshot controlleer", func() {
}},
},
})
// the content source will have the a combination of the vm revision, the vmi and the vm volumes
expectedContent := createVirtualMachineSnapshotContent(vmSnapshot, vm)
vm.ObjectMeta.Generation = 2
vm.Spec.Template.Spec.Domain.Resources.Requests = corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
vmSource.Add(vm)
storageClass := createStorageClass()
storageClassSource.Add(storageClass)
Expand Down
38 changes: 10 additions & 28 deletions pkg/virt-controller/watch/snapshot/source.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ package snapshot
import (
"encoding/json"
"fmt"
"reflect"
"time"

"k8s.io/apimachinery/pkg/types"
Expand Down Expand Up @@ -92,16 +91,6 @@ func (s *vmSnapshotSource) Lock() (bool, error) {
log.Log.V(3).Infof("Vm is offline but %d pods using PVCs %+v", len(pods), pvcNames)
return false, nil
}
} else {
valid, err := s.validateVolumes()
if err != nil {
return false, err
}

if !valid {
log.Log.Infof("VMSnapshot is not supported with hotplug disks")
return false, nil
}
}

if s.vm.Status.SnapshotInProgress != nil && *s.vm.Status.SnapshotInProgress != s.snapshot.Name {
Expand Down Expand Up @@ -157,23 +146,6 @@ func (s *vmSnapshotSource) Unlock() (bool, error) {
return true, nil
}

func (s *vmSnapshotSource) validateVolumes() (bool, error) {
vmi, exists, err := s.controller.getVMI(s.vm)
if err != nil || !exists {
return false, err
}

vmRevision, err := s.getVMRevision()
if err != nil {
return false, err
}

vmiPVCs := getPVCsFromVolumes(vmi.Spec.Volumes)
vmRevisionPVCs := getPVCsFromVolumes(vmRevision.Spec.Template.Spec.Volumes)

return reflect.DeepEqual(vmiPVCs, vmRevisionPVCs), nil
}

func (s *vmSnapshotSource) getVMRevision() (*kubevirtv1.VirtualMachine, error) {
vmi, exists, err := s.controller.getVMI(s.vm)
if err != nil {
Expand Down Expand Up @@ -228,6 +200,16 @@ func (s *vmSnapshotSource) Spec() (snapshotv1.SourceSpec, error) {
annotations[k] = v
}
vmCpy.ObjectMeta.Annotations = annotations
vmi, exists, err := s.controller.getVMI(s.vm)
if err != nil {
return snapshotv1.SourceSpec{}, err
}
if !exists {
return snapshotv1.SourceSpec{}, fmt.Errorf("can't get online snapshot spec, vmi doesn't exist")
}
vmi.Spec.Volumes = s.vm.Spec.Template.Spec.Volumes
vmi.Spec.Domain.Devices.Disks = s.vm.Spec.Template.Spec.Domain.Devices.Disks
vmCpy.Spec.Template.Spec = vmi.Spec
} else {
vmCpy = s.vm.DeepCopy()
vmCpy.Status = kubevirtv1.VirtualMachineStatus{}
Expand Down
113 changes: 17 additions & 96 deletions tests/storage/hotplug.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ const (
virtCtlVolumeName = "--volume-name=%s"
verifyCannotAccessDisk = "ls: %s: No such file or directory"

waitDiskTemplateError = "waiting on new disk to appear in template"
waitVolumeTemplateError = "waiting on new volume to appear in template"
waitVolumeRequestProcessError = "waiting on all VolumeRequests to be processed"

expectReturn = "echo $?\n"
Expand Down Expand Up @@ -183,83 +181,6 @@ var _ = SIGDescribe("Hotplug", func() {
}, 3*time.Second, 1*time.Second).ShouldNot(HaveOccurred())
}

verifyVolumeAndDiskVMAdded := func(vm *v1.VirtualMachine, volumeNames ...string) {
nameMap := make(map[string]bool)
for _, volumeName := range volumeNames {
nameMap[volumeName] = true
}
log.Log.Infof("Checking %d volumes", len(volumeNames))
Eventually(func() error {
updatedVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
if err != nil {
return err
}

if len(updatedVM.Status.VolumeRequests) > 0 {
return fmt.Errorf(waitVolumeRequestProcessError)
}

foundVolume := 0
foundDisk := 0

for _, volume := range updatedVM.Spec.Template.Spec.Volumes {
if _, ok := nameMap[volume.Name]; ok {
foundVolume++
}
}
for _, disk := range updatedVM.Spec.Template.Spec.Domain.Devices.Disks {
if _, ok := nameMap[disk.Name]; ok {
foundDisk++
}
}

if foundDisk != len(volumeNames) {
return fmt.Errorf(waitDiskTemplateError)
}
if foundVolume != len(volumeNames) {
return fmt.Errorf(waitVolumeTemplateError)
}

return nil
}, 90*time.Second, 1*time.Second).ShouldNot(HaveOccurred())
}

verifyVolumeAndDiskVMIAdded := func(vmi *v1.VirtualMachineInstance, volumeNames ...string) {
nameMap := make(map[string]bool)
for _, volumeName := range volumeNames {
nameMap[volumeName] = true
}
Eventually(func() error {
updatedVMI, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
if err != nil {
return err
}

foundVolume := 0
foundDisk := 0

for _, volume := range updatedVMI.Spec.Volumes {
if _, ok := nameMap[volume.Name]; ok {
foundVolume++
}
}
for _, disk := range updatedVMI.Spec.Domain.Devices.Disks {
if _, ok := nameMap[disk.Name]; ok {
foundDisk++
}
}

if foundDisk != len(volumeNames) {
return fmt.Errorf(waitDiskTemplateError)
}
if foundVolume != len(volumeNames) {
return fmt.Errorf(waitVolumeTemplateError)
}

return nil
}, 90*time.Second, 1*time.Second).ShouldNot(HaveOccurred())
}

verifyVolumeAndDiskVMRemoved := func(vm *v1.VirtualMachine, volumeNames ...string) {
nameMap := make(map[string]bool)
for _, volumeName := range volumeNames {
Expand Down Expand Up @@ -518,7 +439,7 @@ var _ = SIGDescribe("Hotplug", func() {
addVolumeFunc(vm.Name, vm.Namespace, testNewVolume1, "madeup", "scsi")
addVolumeFunc(vm.Name, vm.Namespace, testNewVolume2, "madeup", "scsi")
By("Verifying the volumes have been added to the template spec")
verifyVolumeAndDiskVMAdded(vm, testNewVolume1, testNewVolume2)
tests.VerifyVolumeAndDiskVMAdded(virtClient, vm, testNewVolume1, testNewVolume2)
By("Removing new volumes from VM")
removeVolumeFunc(vm.Name, vm.Namespace, testNewVolume1)
removeVolumeFunc(vm.Name, vm.Namespace, testNewVolume2)
Expand Down Expand Up @@ -575,7 +496,7 @@ var _ = SIGDescribe("Hotplug", func() {

vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, dvNames...)
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, dvNames...)
verifyVolumeStatus(vmi, v1.VolumeReady, dvNames...)
getVmiConsoleAndLogin(vmi)
verifyHotplugAttachedAndUseable(vmi, dvNames)
Expand Down Expand Up @@ -650,11 +571,11 @@ var _ = SIGDescribe("Hotplug", func() {
addVolumeFunc(vm.Name, vm.Namespace, "testvolume", dv.Name, "scsi")
By("Verifying the volume and disk are in the VM and VMI")
if !vmiOnly {
verifyVolumeAndDiskVMAdded(vm, "testvolume")
tests.VerifyVolumeAndDiskVMAdded(virtClient, vm, "testvolume")
}
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, "testvolume")
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume")
verifyVolumeStatus(vmi, v1.VolumeReady, "testvolume")
getVmiConsoleAndLogin(vmi)
targets := verifyHotplugAttachedAndUseable(vmi, []string{"testvolume"})
Expand Down Expand Up @@ -691,11 +612,11 @@ var _ = SIGDescribe("Hotplug", func() {
}
By("Verifying the volume and disk are in the VM and VMI")
if !vmiOnly {
verifyVolumeAndDiskVMAdded(vm, testVolumes...)
tests.VerifyVolumeAndDiskVMAdded(virtClient, vm, testVolumes...)
}
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, testVolumes...)
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, testVolumes...)
verifyVolumeStatus(vmi, v1.VolumeReady, testVolumes...)
targets := verifyHotplugAttachedAndUseable(vmi, testVolumes)
verifySingleAttachmentPod(vmi)
Expand Down Expand Up @@ -736,11 +657,11 @@ var _ = SIGDescribe("Hotplug", func() {

By("Verifying the volume and disk are in the VM and VMI")
if !vmiOnly {
verifyVolumeAndDiskVMAdded(vm, testVolumes[:len(testVolumes)-1]...)
tests.VerifyVolumeAndDiskVMAdded(virtClient, vm, testVolumes[:len(testVolumes)-1]...)
}
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, testVolumes[:len(testVolumes)-1]...)
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, testVolumes[:len(testVolumes)-1]...)
waitForAttachmentPodToRun(vmi)
verifyVolumeStatus(vmi, v1.VolumeReady, testVolumes[:len(testVolumes)-1]...)
verifySingleAttachmentPod(vmi)
Expand Down Expand Up @@ -812,10 +733,10 @@ var _ = SIGDescribe("Hotplug", func() {
By("Adding volume to running VM")
addDVVolumeVM(vm.Name, vm.Namespace, "testvolume", dvBlock.Name, "scsi")
By("Verifying the volume and disk are in the VM and VMI")
verifyVolumeAndDiskVMAdded(vm, "testvolume")
tests.VerifyVolumeAndDiskVMAdded(virtClient, vm, "testvolume")
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, "testvolume")
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume")
verifyVolumeStatus(vmi, v1.VolumeReady, "testvolume")
verifySingleAttachmentPod(vmi)

Expand All @@ -833,7 +754,7 @@ var _ = SIGDescribe("Hotplug", func() {
Expect(err).ToNot(HaveOccurred())

By("Verifying that the hotplugged volume is hotpluggable after a restart")
verifyVolumeAndDiskVMIAdded(vmi, "testvolume")
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume")
verifyVolumeStatus(vmi, v1.VolumeReady, "testvolume")

By("Verifying the hotplug device is auto-mounted during booting")
Expand Down Expand Up @@ -877,7 +798,7 @@ var _ = SIGDescribe("Hotplug", func() {
By("Adding volume to running VM")
addDVVolumeVM(vm.Name, vm.Namespace, "block", dvBlock.Name, "scsi")
addDVVolumeVM(vm.Name, vm.Namespace, "fs", dvFileSystem.Name, "scsi")
verifyVolumeAndDiskVMIAdded(vmi, "block", "fs")
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, "block", "fs")

verifyVolumeStatus(vmi, v1.VolumeReady, "block", "fs")
targets := getTargetsFromVolumeStatus(vmi, "block", "fs")
Expand Down Expand Up @@ -987,7 +908,7 @@ var _ = SIGDescribe("Hotplug", func() {
By("Verifying the volume and disk are in the VMI")
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, volumeName)
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, volumeName)
verifyVolumeStatus(vmi, v1.VolumeReady, volumeName)

By("Verifying the VMI is still migrateable")
Expand Down Expand Up @@ -1030,7 +951,7 @@ var _ = SIGDescribe("Hotplug", func() {
addVolumeFunc(vmi.Name, vmi.Namespace, volumeName, dv.Name, "scsi")
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, volumeName)
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, volumeName)
verifyVolumeStatus(vmi, v1.VolumeReady, volumeName)
})
})
Expand Down Expand Up @@ -1074,7 +995,7 @@ var _ = SIGDescribe("Hotplug", func() {
By("Verifying the volume and disk are in the VM and VMI")
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, "testvolume")
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume")
verifyVolumeStatus(vmi, v1.VolumeReady, "testvolume")

getVmiConsoleAndLogin(vmi)
Expand Down Expand Up @@ -1119,7 +1040,7 @@ var _ = SIGDescribe("Hotplug", func() {
By("Verifying the volume and disk are in the VM and VMI")
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, "testvolume")
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume")
verifyVolumeStatus(vmi, v1.VolumeReady, "testvolume")

getVmiConsoleAndLogin(vmi)
Expand Down Expand Up @@ -1166,7 +1087,7 @@ var _ = SIGDescribe("Hotplug", func() {
By("Verifying the volume and disk are in the VM and VMI")
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
verifyVolumeAndDiskVMIAdded(vmi, "testvolume")
tests.VerifyVolumeAndDiskVMIAdded(virtClient, vmi, "testvolume")
verifyVolumeStatus(vmi, v1.VolumeReady, "testvolume")

getVmiConsoleAndLogin(vmi)
Expand Down

0 comments on commit ed80fa5

Please sign in to comment.