Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-0.58] Fix bug in restore-controller when creating a restore PVC with both dataSource and dataSourceRef #8697

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 6 additions & 1 deletion pkg/storage/snapshot/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -878,11 +878,16 @@ func CreateRestorePVCDef(restorePVCName string, volumeSnapshot *vsv1.VolumeSnaps
}

apiGroup := vsv1.GroupName
pvc.Spec.DataSource = &corev1.TypedLocalObjectReference{
dataSource := corev1.TypedLocalObjectReference{
APIGroup: &apiGroup,
Kind: "VolumeSnapshot",
Name: *volumeBackup.VolumeSnapshotName,
}

// We need to overwrite both dataSource and dataSourceRef to avoid incompatibilities between the two
pvc.Spec.DataSource = &dataSource
pvc.Spec.DataSourceRef = &dataSource

pvc.Spec.VolumeName = ""
return pvc
}
Expand Down
81 changes: 80 additions & 1 deletion pkg/storage/snapshot/restore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import (
"kubevirt.io/kubevirt/pkg/util/status"
)

var _ = Describe("Restore controlleer", func() {
var _ = Describe("Restore controller", func() {
const (
testNamespace = "default"
uid = "uid"
Expand Down Expand Up @@ -175,6 +175,24 @@ var _ = Describe("Restore controlleer", func() {
}
}

createPVCsForVMWithDataSourceRef := func(vm *v1.VirtualMachine) []corev1.PersistentVolumeClaim {
var pvcs []corev1.PersistentVolumeClaim
for i, dv := range vm.Spec.DataVolumeTemplates {
pvc := corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: vm.Namespace,
Name: dv.Name,
},
Spec: *dv.Spec.PVC,
}
pvc.Spec.DataSourceRef = dv.Spec.PVC.DataSource
pvc.Spec.VolumeName = fmt.Sprintf("volume%d", i+1)
pvc.ResourceVersion = "1"
pvcs = append(pvcs, pvc)
}
return pvcs
}

Context("One valid Restore controller given", func() {

var ctrl *gomock.Controller
Expand Down Expand Up @@ -881,6 +899,43 @@ var _ = Describe("Restore controlleer", func() {

})
})

It("should create restore PVCs with populated dataSourceRef and dataSource", func() {
// Mock the restore environment from scratch, so we use source PVCs with dataSourceRef
vm := createSnapshotVM()
pvcs := createPVCsForVMWithDataSourceRef(vm)
s := createSnapshot()
sc := createVirtualMachineSnapshotContent(s, vm, pvcs)
storageClass := createStorageClass()
s.Status.VirtualMachineSnapshotContentName = &sc.Name
sc.Status = &snapshotv1.VirtualMachineSnapshotContentStatus{
CreationTime: timeFunc(),
ReadyToUse: &t,
}
vmSnapshotSource.Add(s)
vmSnapshotContentSource.Add(sc)
storageClassSource.Add(storageClass)

// Actual test
r := createRestoreWithOwner()
vm = createModifiedVM()
r.Status = &snapshotv1.VirtualMachineRestoreStatus{
Complete: &f,
Conditions: []snapshotv1.Condition{
newProgressingCondition(corev1.ConditionTrue, "Creating new PVCs"),
newReadyCondition(corev1.ConditionFalse, "Waiting for new PVCs"),
},
}
vmSource.Add(vm)
addVolumeRestores(r)
pvcSize := resource.MustParse("2Gi")
vs := createVolumeSnapshot(r.Status.Restores[0].VolumeSnapshotName, pvcSize)
fakeVolumeSnapshotProvider.Add(vs)
expectUpdateVMRestoreInProgress(vm)
expectPVCCreateWithDataSourceRef(k8sClient, r, pvcSize)
addVirtualMachineRestore(r)
controller.processVMRestoreWorkItem()
})
})
})

Expand All @@ -904,6 +959,30 @@ func expectPVCCreates(client *k8sfake.Clientset, vmRestore *snapshotv1.VirtualMa
})
}

func expectPVCCreateWithDataSourceRef(client *k8sfake.Clientset, vmRestore *snapshotv1.VirtualMachineRestore, expectedSize resource.Quantity) {
client.Fake.PrependReactor("create", "persistentvolumeclaims", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())

createObj := create.GetObject().(*corev1.PersistentVolumeClaim)
found := false
for _, vr := range vmRestore.Status.Restores {
if vr.PersistentVolumeClaimName == createObj.Name {
Expect(createObj.Spec.Resources.Requests[corev1.ResourceStorage]).To(Equal(expectedSize))
found = true
break
}
}
Expect(found).To(BeTrue())

Expect(createObj.Spec.DataSource).ToNot(BeNil())
Expect(createObj.Spec.DataSourceRef).ToNot(BeNil())
Expect(createObj.Spec.DataSource).To(Equal(createObj.Spec.DataSourceRef))

return true, create.GetObject(), nil
})
}

func expectPVCUpdates(client *k8sfake.Clientset, vmRestore *snapshotv1.VirtualMachineRestore) {
client.Fake.PrependReactor("update", "persistentvolumeclaims", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.UpdateAction)
Expand Down
54 changes: 54 additions & 0 deletions tests/storage/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/utils/pointer"

clonev1alpha1 "kubevirt.io/api/clone/v1alpha1"
"kubevirt.io/api/core"
v1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1alpha1"
Expand Down Expand Up @@ -894,6 +896,32 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
verifyOwnerRef(pvc, "cdi.kubevirt.io/v1beta1", "DataVolume", dv.Name, dv.UID)
}

cloneVM := func(sourceVMName, targetVMName string) {
By("Creating VM clone")
vmClone := kubecli.NewMinimalCloneWithNS("testclone", util.NamespaceTestDefault)
cloneSourceRef := &corev1.TypedLocalObjectReference{
APIGroup: pointer.String(groupName),
Kind: "VirtualMachine",
Name: sourceVMName,
}
cloneTargetRef := cloneSourceRef.DeepCopy()
cloneTargetRef.Name = targetVMName
vmClone.Spec.Source = cloneSourceRef
vmClone.Spec.Target = cloneTargetRef

By(fmt.Sprintf("Creating clone object %s", vmClone.Name))
vmClone, err = virtClient.VirtualMachineClone(vmClone.Namespace).Create(context.Background(), vmClone, metav1.CreateOptions{})
Expect(err).ShouldNot(HaveOccurred())

By(fmt.Sprintf("Waiting for the clone %s to finish", vmClone.Name))
Eventually(func() clonev1alpha1.VirtualMachineClonePhase {
vmClone, err = virtClient.VirtualMachineClone(vmClone.Namespace).Get(context.Background(), vmClone.Name, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())

return vmClone.Status.Phase
}, 3*time.Minute, 3*time.Second).Should(Equal(clonev1alpha1.Succeeded), "clone should finish successfully")
}

It("[test_id:5259]should restore a vm multiple from the same snapshot", func() {
vm, vmi = createAndStartVM(tests.NewRandomVMWithDataVolumeAndUserDataInStorageClass(
cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros),
Expand Down Expand Up @@ -1406,6 +1434,32 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
Expect(vmi.Spec.Domain.Resources.Requests[corev1.ResourceMemory]).To(Equal(initialMemory))
})

It("should restore an already cloned virtual machine", func() {
vm, vmi = createAndStartVM(tests.NewRandomVMWithDataVolumeWithRegistryImport(
cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskFedoraTestTooling),
util.NamespaceTestDefault,
snapshotStorageClass,
corev1.ReadWriteOnce))

targetVMName := vm.Name + "-clone"
cloneVM(vm.Name, targetVMName)

By(fmt.Sprintf("Getting the cloned VM %s", targetVMName))
targetVM, err := virtClient.VirtualMachine(vm.Namespace).Get(targetVMName, &metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())

By(creatingSnapshot)
snapshot = createSnapshot(targetVM)
newVM = tests.StopVirtualMachine(targetVM)

By("Restoring cloned VM")
restore = createRestoreDef(newVM.Name, snapshot.Name)
restore, err = virtClient.VirtualMachineRestore(targetVM.Namespace).Create(context.Background(), restore, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
restore = waitRestoreComplete(restore, newVM.Name, &newVM.UID)
Expect(restore.Status.Restores).To(HaveLen(1))
})

DescribeTable("should restore vm with hot plug disks", func(restoreToNewVM bool) {
vm, vmi = createAndStartVM(tests.NewRandomVMWithDataVolumeWithRegistryImport(
cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskFedoraTestTooling),
Expand Down