Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated cherry pick of #107173: Fix order of commands in the snapshot tests for persistent #107341

Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
68 changes: 36 additions & 32 deletions test/e2e/storage/testsuites/snapshottable.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,11 +145,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)

pod = StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
e2epod.DeletePodWithWait(cs, pod)
})

// At this point a pod is running with a PVC. How to proceed depends on which test is running.
// At this point a pod is created with a PVC. How to proceed depends on which test is running.
}

cleanup := func() {
Expand Down Expand Up @@ -178,6 +175,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}
init()

// delete the pod at the end of the test
cleanupSteps = append(cleanupSteps, func() {
e2epod.DeletePodWithWait(cs, pod)
})

// We can test snapshotting of generic
// ephemeral volumes by creating the snapshot
// while the pod is running (online). We cannot do it after pod deletion,
Expand Down Expand Up @@ -308,36 +310,31 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}
init()

pvc = volumeResource.Pvc
sc = volumeResource.Sc

// The pod should be in the Success state.
ginkgo.By("[init] check pod success")
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to fetch pod: %v", err)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
// Sync the pod to know additional fields.
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "check pod after it terminated")
framework.ExpectNoError(err, "Failed to fetch pod: %v", err)

// Get new copy of the claim
ginkgo.By("[init] checking the claim")
pvcName := volumeResource.Pvc.Name
pvcNamespace := volumeResource.Pvc.Namespace

parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
vs := sr.Vs
vsc := sr.Vsclass

err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
// Get new copy of the claim.
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)

pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
framework.ExpectNoError(err, "get PVC")
claimSize = pvc.Spec.Resources.Requests.Storage().String()
sc = volumeResource.Sc

// Get the bound PV
// Get the bound PV.
ginkgo.By("[init] checking the PV")
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)

// Delete the pod to force NodeUnpublishVolume (unlike the ephemeral case where the pod is deleted at the end of the test).
ginkgo.By("[init] deleting the pod")
StopPod(cs, pod)

Expand Down Expand Up @@ -386,6 +383,15 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
framework.Failf("timed out waiting for node=%s to not use the volume=%s", nodeName, volumeName)
}

// Take the snapshot.
parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
vs := sr.Vs
vsc := sr.Vsclass

// Get new copy of the snapshot
ginkgo.By("checking the snapshot")
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
Expand All @@ -400,9 +406,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})

var restoredPVC *v1.PersistentVolumeClaim
var restoredPod *v1.Pod

// Check SnapshotContent properties
ginkgo.By("checking the SnapshotContent")
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
Expand All @@ -413,6 +416,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())

ginkgo.By("Modifying source data test")
var restoredPVC *v1.PersistentVolumeClaim
var restoredPod *v1.Pod
modifiedMntTestData := fmt.Sprintf("modified data from %s namespace", pvc.GetNamespace())

ginkgo.By("modifying the data in the source PVC")
Expand All @@ -421,6 +426,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)

ginkgo.By("creating a pvc from the snapshot")
claimSize = pvc.Spec.Resources.Requests.Storage().String()
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(sc.Name),
Expand Down Expand Up @@ -451,11 +457,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
StopPod(cs, restoredPod)
})
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume {
commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err)
}
commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err)

ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")

Expand Down