Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-0.53] Backport 'Fix vm restore in case of restore size bigger then PVC requested size' #7941

Merged
merged 2 commits into from
Jun 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/virt-controller/watch/application.go
Original file line number Diff line number Diff line change
Expand Up @@ -635,6 +635,7 @@ func (vca *VirtControllerApp) initRestoreController() {
DataVolumeInformer: vca.dataVolumeInformer,
PVCInformer: vca.persistentVolumeClaimInformer,
StorageClassInformer: vca.storageClassInformer,
VolumeSnapshotProvider: vca.snapshotController,
Recorder: recorder,
}
vca.restoreController.Init()
Expand Down
20 changes: 19 additions & 1 deletion pkg/virt-controller/watch/snapshot/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -740,6 +740,24 @@ func (ctrl *VMRestoreController) createRestorePVC(
return fmt.Errorf("missing VolumeSnapshot name")
}

volumeSnapshot, err := ctrl.VolumeSnapshotProvider.GetVolumeSnapshot(vmRestore.Namespace, *volumeBackup.VolumeSnapshotName)
if err != nil {
return err
}

if volumeSnapshot == nil {
log.Log.Errorf("VolumeSnapshot %s is missing", *volumeBackup.VolumeSnapshotName)
return fmt.Errorf("missing VolumeSnapshot %s", *volumeBackup.VolumeSnapshotName)
}

if volumeSnapshot.Status != nil && volumeSnapshot.Status.RestoreSize != nil {
restorePVCSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
// Update restore pvc size to be the maximum between the source PVC and the restore size
if !ok || restorePVCSize.Cmp(*volumeSnapshot.Status.RestoreSize) < 0 {
pvc.Spec.Resources.Requests[corev1.ResourceStorage] = *volumeSnapshot.Status.RestoreSize
}
}

if pvc.Labels == nil {
pvc.Labels = make(map[string]string)
}
Expand Down Expand Up @@ -768,7 +786,7 @@ func (ctrl *VMRestoreController) createRestorePVC(

target.Own(pvc)

_, err := ctrl.Client.CoreV1().PersistentVolumeClaims(vmRestore.Namespace).Create(context.Background(), pvc, metav1.CreateOptions{})
_, err = ctrl.Client.CoreV1().PersistentVolumeClaims(vmRestore.Namespace).Create(context.Background(), pvc, metav1.CreateOptions{})
if err != nil {
return err
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/virt-controller/watch/snapshot/restore_base.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ type VMRestoreController struct {
PVCInformer cache.SharedIndexInformer
StorageClassInformer cache.SharedIndexInformer

VolumeSnapshotProvider VolumeSnapshotProvider

Recorder record.EventRecorder

vmRestoreQueue workqueue.RateLimitingInterface
Expand Down
87 changes: 85 additions & 2 deletions pkg/virt-controller/watch/snapshot/restore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"

"github.com/golang/mock/gomock"
vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -162,6 +163,17 @@ var _ = Describe("Restore controlleer", func() {
}
}

createVolumeSnapshot := func(name string, restoreSize resource.Quantity) *vsv1.VolumeSnapshot {
return &vsv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Status: &vsv1.VolumeSnapshotStatus{
RestoreSize: &restoreSize,
},
}
}

Context("One valid Restore controller given", func() {

var ctrl *gomock.Controller
Expand Down Expand Up @@ -196,6 +208,7 @@ var _ = Describe("Restore controlleer", func() {
var controller *VMRestoreController
var recorder *record.FakeRecorder
var mockVMRestoreQueue *testutils.MockWorkQueue
var fakeVolumeSnapshotProvider *MockVolumeSnapshotProvider

var kubevirtClient *kubevirtfake.Clientset
var k8sClient *k8sfake.Clientset
Expand Down Expand Up @@ -241,6 +254,10 @@ var _ = Describe("Restore controlleer", func() {
recorder = record.NewFakeRecorder(100)
recorder.IncludeObject = true

fakeVolumeSnapshotProvider = &MockVolumeSnapshotProvider{
volumeSnapshots: []*vsv1.VolumeSnapshot{},
}

controller = &VMRestoreController{
Client: virtClient,
VMRestoreInformer: vmRestoreInformer,
Expand All @@ -253,6 +270,7 @@ var _ = Describe("Restore controlleer", func() {
DataVolumeInformer: dataVolumeInformer,
Recorder: recorder,
vmStatusUpdater: status.NewVMStatusUpdater(virtClient),
VolumeSnapshotProvider: fakeVolumeSnapshotProvider,
}
controller.Init()

Expand Down Expand Up @@ -417,8 +435,54 @@ var _ = Describe("Restore controlleer", func() {
}
vmSource.Add(vm)
addVolumeRestores(r)
pvcSize := resource.MustParse("2Gi")
vs := createVolumeSnapshot(r.Status.Restores[0].VolumeSnapshotName, pvcSize)
fakeVolumeSnapshotProvider.Add(vs)
expectUpdateVMRestoreInProgress(vm)
expectPVCCreates(k8sClient, r, pvcSize)
addVirtualMachineRestore(r)
controller.processVMRestoreWorkItem()
})

It("should create restore PVC with volume snapshot size if bigger then PVC size", func() {
r := createRestoreWithOwner()
vm := createModifiedVM()
r.Status = &snapshotv1.VirtualMachineRestoreStatus{
Complete: &f,
Conditions: []snapshotv1.Condition{
newProgressingCondition(corev1.ConditionTrue, "Creating new PVCs"),
newReadyCondition(corev1.ConditionFalse, "Waiting for new PVCs"),
},
}
vmSource.Add(vm)
addVolumeRestores(r)
q := resource.MustParse("3Gi")
vs := createVolumeSnapshot(r.Status.Restores[0].VolumeSnapshotName, q)
fakeVolumeSnapshotProvider.Add(vs)
expectUpdateVMRestoreInProgress(vm)
expectPVCCreates(k8sClient, r)
expectPVCCreates(k8sClient, r, q)
addVirtualMachineRestore(r)
controller.processVMRestoreWorkItem()
})

It("should create restore PVC with pvc size if restore size is smaller", func() {
r := createRestoreWithOwner()
vm := createModifiedVM()
r.Status = &snapshotv1.VirtualMachineRestoreStatus{
Complete: &f,
Conditions: []snapshotv1.Condition{
newProgressingCondition(corev1.ConditionTrue, "Creating new PVCs"),
newReadyCondition(corev1.ConditionFalse, "Waiting for new PVCs"),
},
}
vmSource.Add(vm)
addVolumeRestores(r)
q := resource.MustParse("1Gi")
vs := createVolumeSnapshot(r.Status.Restores[0].VolumeSnapshotName, q)
fakeVolumeSnapshotProvider.Add(vs)
expectUpdateVMRestoreInProgress(vm)
pvcSize := resource.MustParse("2Gi")
expectPVCCreates(k8sClient, r, pvcSize)
addVirtualMachineRestore(r)
controller.processVMRestoreWorkItem()
})
Expand Down Expand Up @@ -784,7 +848,7 @@ var _ = Describe("Restore controlleer", func() {
})
})

func expectPVCCreates(client *k8sfake.Clientset, vmRestore *snapshotv1.VirtualMachineRestore) {
func expectPVCCreates(client *k8sfake.Clientset, vmRestore *snapshotv1.VirtualMachineRestore, expectedSize resource.Quantity) {
client.Fake.PrependReactor("create", "persistentvolumeclaims", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
create, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
Expand All @@ -793,6 +857,7 @@ func expectPVCCreates(client *k8sfake.Clientset, vmRestore *snapshotv1.VirtualMa
found := false
for _, vr := range vmRestore.Status.Restores {
if vr.PersistentVolumeClaimName == createObj.Name {
Expect(createObj.Spec.Resources.Requests[corev1.ResourceStorage]).To(Equal(expectedSize))
found = true
break
}
Expand Down Expand Up @@ -852,3 +917,21 @@ func expectDataVolumeDeletes(client *cdifake.Clientset, names []string) {
return true, nil, nil
})
}

// A mock to implement volumeSnapshotProvider interface
type MockVolumeSnapshotProvider struct {
volumeSnapshots []*vsv1.VolumeSnapshot
}

func (v *MockVolumeSnapshotProvider) GetVolumeSnapshot(namespace, name string) (*vsv1.VolumeSnapshot, error) {
if len(v.volumeSnapshots) == 0 {
return nil, nil
}
vs := v.volumeSnapshots[0]
v.volumeSnapshots = v.volumeSnapshots[1:]
return vs, nil
}

func (v *MockVolumeSnapshotProvider) Add(s *vsv1.VolumeSnapshot) {
v.volumeSnapshots = append(v.volumeSnapshots, s)
}
2 changes: 1 addition & 1 deletion pkg/virt-controller/watch/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ func (ctrl *VMSnapshotController) updateVMSnapshotContent(content *snapshotv1.Vi

vsName := *volumeBackup.VolumeSnapshotName

volumeSnapshot, err := ctrl.getVolumeSnapshot(content.Namespace, vsName)
volumeSnapshot, err := ctrl.GetVolumeSnapshot(content.Namespace, vsName)
if err != nil {
return 0, err
}
Expand Down
40 changes: 22 additions & 18 deletions pkg/virt-controller/watch/snapshot/snapshot_base.go
Original file line number Diff line number Diff line change
Expand Up @@ -555,24 +555,6 @@ func (ctrl *VMSnapshotController) handlePVC(obj interface{}) {
}
}

func (ctrl *VMSnapshotController) getVolumeSnapshot(namespace, name string) (*vsv1.VolumeSnapshot, error) {
di := ctrl.dynamicInformerMap[volumeSnapshotCRD]
di.mutex.Lock()
defer di.mutex.Unlock()

if di.informer == nil {
return nil, nil
}

key := fmt.Sprintf("%s/%s", namespace, name)
obj, exists, err := di.informer.GetStore().GetByKey(key)
if !exists || err != nil {
return nil, err
}

return obj.(*vsv1.VolumeSnapshot).DeepCopy(), nil
}

func (ctrl *VMSnapshotController) getVolumeSnapshotClasses() []vsv1.VolumeSnapshotClass {
di := ctrl.dynamicInformerMap[volumeSnapshotClassCRD]
di.mutex.Lock()
Expand Down Expand Up @@ -639,3 +621,25 @@ func (ctrl *VMSnapshotController) deleteDynamicInformer(name string) (time.Durat

return 0, nil
}

type VolumeSnapshotProvider interface {
GetVolumeSnapshot(string, string) (*vsv1.VolumeSnapshot, error)
}

func (ctrl *VMSnapshotController) GetVolumeSnapshot(namespace, name string) (*vsv1.VolumeSnapshot, error) {
di := ctrl.dynamicInformerMap[volumeSnapshotCRD]
di.mutex.Lock()
defer di.mutex.Unlock()

if di.informer == nil {
return nil, nil
}

key := fmt.Sprintf("%s/%s", namespace, name)
obj, exists, err := di.informer.GetStore().GetByKey(key)
if !exists || err != nil {
return nil, err
}

return obj.(*vsv1.VolumeSnapshot).DeepCopy(), nil
}
40 changes: 40 additions & 0 deletions tests/storage/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -808,6 +808,46 @@ var _ = SIGDescribe("[Serial]VirtualMachineRestore Tests", func() {
}
})

// This test is relevant to provisioner which round up the recieved size of
// the PVC. Currently we only test vmsnapshot tests which ceph which has this
// behavior. In case of running this test with other provisioner or if ceph
// will change this behavior it will fail.
DescribeTable("should restore a vm with restore size bigger then PVC size", func(restoreToNewVM bool) {
vm = tests.NewRandomVMWithDataVolumeAndUserDataInStorageClass(
cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros),
util.NamespaceTestDefault,
tests.BashHelloScript,
snapshotStorageClass,
)
quantity, err := resource.ParseQuantity("1528Mi")
Expect(err).ToNot(HaveOccurred())
vm.Spec.DataVolumeTemplates[0].Spec.PVC.Resources.Requests["storage"] = quantity
vm, vmi = createAndStartVM(vm)
expectedCapacity, err := resource.ParseQuantity("2Gi")
Expect(err).ToNot(HaveOccurred())
pvc, err := virtClient.CoreV1().PersistentVolumeClaims(vm.Namespace).Get(context.Background(), vm.Spec.DataVolumeTemplates[0].Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(pvc.Status.Capacity["storage"]).To(Equal(expectedCapacity))

doRestore("", console.LoginToCirros, false, 1, getTargetVMName(restoreToNewVM))

content, err := virtClient.VirtualMachineSnapshotContent(vm.Namespace).Get(context.Background(), *snapshot.Status.VirtualMachineSnapshotContentName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(content.Spec.VolumeBackups[0].PersistentVolumeClaim.Spec.Resources.Requests["storage"]).To(Equal(quantity))
vs, err := virtClient.KubernetesSnapshotClient().SnapshotV1().VolumeSnapshots(vm.Namespace).Get(context.Background(), *content.Spec.VolumeBackups[0].VolumeSnapshotName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(*vs.Status.RestoreSize).To(Equal(expectedCapacity))

pvc, err = virtClient.CoreV1().PersistentVolumeClaims(vm.Namespace).Get(context.Background(), restore.Status.Restores[0].PersistentVolumeClaimName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(pvc.Status.Capacity["storage"]).To(Equal(expectedCapacity))
Expect(pvc.Spec.Resources.Requests["storage"]).To(Equal(expectedCapacity))

},
Entry("to the same VM", false),
Entry("to a new VM", true),
)

DescribeTable("should restore a vm that boots from a datavolumetemplate", func(restoreToNewVM bool) {
vm, vmi = createAndStartVM(tests.NewRandomVMWithDataVolumeAndUserDataInStorageClass(
cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros),
Expand Down