From 8628388445b7d1949a6ec3ccb76f0f4e42db7bfa Mon Sep 17 00:00:00 2001 From: danfengl Date: Sun, 9 Apr 2023 01:42:08 +0000 Subject: [PATCH] [Cherry-pick 1.11]Add E2E test for schedule backup creation principle Signed-off-by: danfengl --- test/e2e/basic/pvc-selected-node-changing.go | 2 +- test/e2e/basic/storage-class-changing.go | 2 +- test/e2e/e2e_suite_test.go | 1 + test/e2e/pv-backup/pv-backup-filter.go | 2 +- test/e2e/schedule/schedule-backup-creation.go | 138 ++++++++++++++++++ test/e2e/schedule/schedule.go | 2 +- test/e2e/util/k8s/common.go | 36 +---- test/e2e/util/k8s/pod.go | 33 ++++- test/e2e/util/k8s/pvc.go | 2 +- 9 files changed, 178 insertions(+), 40 deletions(-) create mode 100644 test/e2e/schedule/schedule-backup-creation.go diff --git a/test/e2e/basic/pvc-selected-node-changing.go b/test/e2e/basic/pvc-selected-node-changing.go index 76796ded66..9b31966a32 100644 --- a/test/e2e/basic/pvc-selected-node-changing.go +++ b/test/e2e/basic/pvc-selected-node-changing.go @@ -87,7 +87,7 @@ func (p *PVCSelectedNodeChanging) CreateResources() error { p.oldNodeName = nodeName fmt.Printf("Create PVC on node %s\n", p.oldNodeName) pvcAnn := map[string]string{p.ann: nodeName} - _, err := CreatePodWithPVC(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn) + _, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn, nil) Expect(err).To(Succeed()) err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName}) Expect(err).To(Succeed()) diff --git a/test/e2e/basic/storage-class-changing.go b/test/e2e/basic/storage-class-changing.go index ae4379ecbe..4ae1d71c68 100644 --- a/test/e2e/basic/storage-class-changing.go +++ b/test/e2e/basic/storage-class-changing.go @@ -85,7 +85,7 @@ func (s *StorageClasssChanging) CreateResources() error { }) By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() { - _, err := CreatePodWithPVC(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil) + _, err := CreatePod(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil, nil) Expect(err).To(Succeed()) }) By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() { diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 4857416619..4ce7486d23 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -117,6 +117,7 @@ var _ = Describe("[Backups][BackupsSync] Backups in object storage are synced to var _ = Describe("[Schedule][BR][Pause][LongTime] Backup will be created periodly by schedule defined by a Cron expression", ScheduleBackupTest) var _ = Describe("[Schedule][OrederedResources] Backup resources should follow the specific order in schedule", ScheduleOrderedResources) +var _ = Describe("[Schedule][BackupCreation] Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", ScheduleBackupCreationTest) var _ = Describe("[PrivilegesMgmt][SSR] Velero test on ssr object when controller namespace mix-ups", SSRTest) diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index af6d59cdc4..47785309ca 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -97,7 +97,7 @@ func (p *PVBackupFiltering) CreateResources() error { podName := fmt.Sprintf("pod-%d", i) pods = append(pods, podName) By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() { - pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil) + pod, err := CreatePod(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil, nil) Expect(err).To(Succeed()) ann := map[string]string{ p.annotation: volumesToAnnotation, diff --git a/test/e2e/schedule/schedule-backup-creation.go b/test/e2e/schedule/schedule-backup-creation.go new file mode 100644 index 0000000000..1202c0ef2a --- /dev/null +++ b/test/e2e/schedule/schedule-backup-creation.go @@ -0,0 +1,138 @@ +package schedule + +import ( + "context" + "fmt" + "math/rand" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/vmware-tanzu/velero/test/e2e" + . "github.com/vmware-tanzu/velero/test/e2e/test" + . "github.com/vmware-tanzu/velero/test/e2e/util/k8s" + . "github.com/vmware-tanzu/velero/test/e2e/util/velero" +) + +type ScheduleBackupCreation struct { + TestCase + namespace string + ScheduleName string + ScheduleArgs []string + Period int //Limitation: The unit is minitue only and 60 is divisible by it + randBackupName string + verifyTimes int + volume string + podName string + pvcName string + podAnn map[string]string + podSleepDuration time.Duration +} + +var ScheduleBackupCreationTest func() = TestFunc(&ScheduleBackupCreation{namespace: "sch1", TestCase: TestCase{NSBaseName: "schedule-backup-creation-test", UseVolumeSnapshots: false}}) + +func (n *ScheduleBackupCreation) Init() error { + n.VeleroCfg = VeleroCfg + n.Client = *n.VeleroCfg.ClientToInstallVelero + n.Period = 3 // Unit is minute + n.verifyTimes = 5 // More larger verify times more confidence we have + podSleepDurationStr := "300s" + n.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr) + n.TestMsg = &TestMSG{ + Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", + FailedMSG: "Failed to verify schedule back creation behavior", + Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", + } + n.podAnn = map[string]string{ + "pre.hook.backup.velero.io/container": n.podName, + "pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]", + "pre.hook.backup.velero.io/timeout": "600s", + } + n.volume = "volume-1" + n.podName = "pod-1" + n.pvcName = "pvc-1" + return nil +} + +func (n *ScheduleBackupCreation) StartRun() error { + n.namespace = fmt.Sprintf("%s-%s", n.NSBaseName, "ns") + n.ScheduleName = n.ScheduleName + "schedule-" + UUIDgen.String() + n.RestoreName = n.RestoreName + "restore-ns-mapping-" + UUIDgen.String() + + n.ScheduleArgs = []string{ + "--include-namespaces", n.namespace, + "--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *", + "--default-volumes-to-fs-backup", + } + Expect(n.Period < 30).To(Equal(true)) + return nil +} +func (p *ScheduleBackupCreation) CreateResources() error { + p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute) + By(fmt.Sprintf("Create namespace %s", p.namespace), func() { + Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(), + fmt.Sprintf("Failed to create namespace %s", p.namespace)) + }) + + By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() { + _, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, nil, p.podAnn) + Expect(err).To(Succeed()) + err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName}) + Expect(err).To(Succeed()) + }) + return nil +} + +func (n *ScheduleBackupCreation) Backup() error { + // Wait until the beginning of the given period to create schedule, it will give us + // a predictable period to wait for the first scheduled backup, and verify no immediate + // scheduled backup was created between schedule creation and first scheduled backup. + By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() { + for i := 0; i < n.Period*60/30; i++ { + time.Sleep(30 * time.Second) + now := time.Now().Minute() + triggerNow := now % n.Period + if triggerNow == 0 { + Expect(VeleroScheduleCreate(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string { + RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "") + return "Fail to restore workload" + }) + break + } + } + }) + + By("Delay one more minute to make sure the new backup was created in the given period", func() { + time.Sleep(1 * time.Minute) + }) + + By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() { + for i := 1; i <= n.verifyTimes; i++ { + fmt.Printf("Start to sleep %d minute #%d time...\n", n.podSleepDuration, i) + mi, _ := time.ParseDuration("60s") + time.Sleep(n.podSleepDuration + mi) + bMap := make(map[string]string) + backupsInfo, err := GetScheduledBackupsCreationTime(context.Background(), VeleroCfg.VeleroCLI, "default", n.ScheduleName) + Expect(err).To(Succeed()) + Expect(len(backupsInfo) == i).To(Equal(true)) + for index, bi := range backupsInfo { + bList := strings.Split(bi, ",") + fmt.Printf("Backup %d: %v\n", index, bList) + bMap[bList[0]] = bList[1] + _, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1]) + Expect(err).To(Succeed()) + } + if i == n.verifyTimes-1 { + backupInfo := backupsInfo[rand.Intn(len(backupsInfo))] + n.randBackupName = strings.Split(backupInfo, ",")[0] + } + } + }) + return nil +} + +func (n *ScheduleBackupCreation) Restore() error { + return nil +} diff --git a/test/e2e/schedule/schedule.go b/test/e2e/schedule/schedule.go index 01f7234a0a..de3e06b117 100644 --- a/test/e2e/schedule/schedule.go +++ b/test/e2e/schedule/schedule.go @@ -31,7 +31,7 @@ func (n *ScheduleBackup) Init() error { n.VeleroCfg = VeleroCfg n.Client = *n.VeleroCfg.ClientToInstallVelero n.Period = 3 // Unit is minute - n.verifyTimes = 5 // More verify times more confidence + n.verifyTimes = 5 // More larger verify times more confidence we have n.TestMsg = &TestMSG{ Desc: "Set up a scheduled backup defined by a Cron expression", FailedMSG: "Failed to schedule a backup", diff --git a/test/e2e/util/k8s/common.go b/test/e2e/util/k8s/common.go index 730a4ee3b1..eb0ec43c67 100644 --- a/test/e2e/util/k8s/common.go +++ b/test/e2e/util/k8s/common.go @@ -25,7 +25,6 @@ import ( "github.com/pkg/errors" "golang.org/x/net/context" corev1 "k8s.io/api/core/v1" - corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -64,12 +63,12 @@ func WaitForPods(ctx context.Context, client TestClient, namespace string, pods checkPod, err := client.ClientGo.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { //Should ignore "etcdserver: request timed out" kind of errors, try to get pod status again before timeout. - fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1api.PodRunning))) + fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1.PodRunning))) return false, nil } // If any pod is still waiting we don't need to check any more so return and wait for next poll interval - if checkPod.Status.Phase != corev1api.PodRunning { - fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1api.PodRunning) + if checkPod.Status.Phase != corev1.PodRunning { + fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1.PodRunning) return false, nil } } @@ -255,35 +254,6 @@ func GetPVByPodName(client TestClient, namespace, podName string) (string, error } return pv_value.Name, nil } -func CreatePodWithPVC(client TestClient, ns, podName, sc, pvcName string, volumeNameList []string, pvcAnn map[string]string) (*corev1.Pod, error) { - volumes := []corev1.Volume{} - for _, volume := range volumeNameList { - var _pvcName string - if pvcName == "" { - _pvcName = fmt.Sprintf("pvc-%s", volume) - } else { - _pvcName = pvcName - } - pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn) - if err != nil { - return nil, err - } - volumes = append(volumes, corev1.Volume{ - Name: volume, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvc.Name, - ReadOnly: false, - }, - }, - }) - } - pod, err := CreatePod(client, ns, podName, volumes) - if err != nil { - return nil, err - } - return pod, nil -} func CreateFileToPod(ctx context.Context, namespace, podName, volume, filename, content string) error { arg := []string{"exec", "-n", namespace, "-c", podName, podName, diff --git a/test/e2e/util/k8s/pod.go b/test/e2e/util/k8s/pod.go index 4caf299a33..a0daef03aa 100644 --- a/test/e2e/util/k8s/pod.go +++ b/test/e2e/util/k8s/pod.go @@ -26,7 +26,34 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*corev1.Pod, error) { +func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList []string, pvcAnn, ann map[string]string) (*corev1.Pod, error) { + if pvcName != "" && len(volumeNameList) != 1 { + return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty") + } + volumes := []corev1.Volume{} + for _, volume := range volumeNameList { + var _pvcName string + if pvcName == "" { + _pvcName = fmt.Sprintf("pvc-%s", volume) + } else { + _pvcName = pvcName + } + pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn) + if err != nil { + return nil, err + } + + volumes = append(volumes, corev1.Volume{ + Name: volume, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + ReadOnly: false, + }, + }, + }) + } + vmList := []corev1.VolumeMount{} for _, v := range volumes { vmList = append(vmList, corev1.VolumeMount{ @@ -34,9 +61,11 @@ func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*co MountPath: "/" + v.Name, }) } + p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, + Annotations: ann, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ diff --git a/test/e2e/util/k8s/pvc.go b/test/e2e/util/k8s/pvc.go index 32e6b95287..a12ef92b74 100644 --- a/test/e2e/util/k8s/pvc.go +++ b/test/e2e/util/k8s/pvc.go @@ -38,7 +38,7 @@ func CreatePVC(client TestClient, ns, name, sc string, ann map[string]string) (* }, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), + corev1.ResourceStorage: resource.MustParse("1Mi"), }, }, StorageClassName: &sc,