diff --git a/test/e2e/storage/csi_mock/base.go b/test/e2e/storage/csi_mock/base.go index 0d56cb61f6d9..c822509ed290 100644 --- a/test/e2e/storage/csi_mock/base.go +++ b/test/e2e/storage/csi_mock/base.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "reflect" "strconv" "strings" "sync/atomic" @@ -72,6 +73,7 @@ const ( type csiCall struct { expectedMethod string expectedError codes.Code + expectedSecret map[string]string // This is a mark for the test itself to delete the tested pod *after* // this csiCall is received. deletePod bool @@ -98,6 +100,7 @@ type testParameters struct { fsGroupPolicy *storagev1.FSGroupPolicy enableSELinuxMount *bool enableRecoverExpansionFailure bool + enableCSINodeExpandSecret bool } type mockDriverSetup struct { @@ -127,6 +130,9 @@ const ( volumeSnapshotContentFinalizer = "snapshot.storage.kubernetes.io/volumesnapshotcontent-bound-protection" volumeSnapshotBoundFinalizer = "snapshot.storage.kubernetes.io/volumesnapshot-bound-protection" errReasonNotEnoughSpace = "node(s) did not have enough free storage" + + csiNodeExpandSecretKey = "csi.storage.k8s.io/node-expand-secret-name" + csiNodeExpandSecretNamespaceKey = "csi.storage.k8s.io/node-expand-secret-namespace" ) var ( @@ -247,6 +253,18 @@ func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType) f := m.f sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "") + if m.tp.enableCSINodeExpandSecret { + if sc.Parameters == nil { + parameters := map[string]string{ + csiNodeExpandSecretKey: "test-secret", + csiNodeExpandSecretNamespaceKey: f.Namespace.Name, + } + sc.Parameters = parameters + } else { + sc.Parameters[csiNodeExpandSecretKey] = "test-secret" + sc.Parameters[csiNodeExpandSecretNamespaceKey] = f.Namespace.Name + } + } scTest := testsuites.StorageClassTest{ Name: m.driver.GetDriverInfo().Name, Timeouts: f.Timeouts, @@ -815,6 +833,14 @@ func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSeq if c.Method != expectedCall.expectedMethod || c.FullError.Code != expectedCall.expectedError { return allCalls, i, fmt.Errorf("Unexpected CSI call %d: expected %s (%d), got %s (%d)", i, expectedCall.expectedMethod, expectedCall.expectedError, c.Method, c.FullError.Code) } + + // if the secret is not nil, compare it + if expectedCall.expectedSecret != nil { + if !reflect.DeepEqual(expectedCall.expectedSecret, c.Request.Secret) { + return allCalls, i, fmt.Errorf("Unexpected secret: expected %v, got %v", expectedCall.expectedSecret, c.Request.Secret) + } + } + } if len(calls) > len(expectedCallSequence) { return allCalls, len(expectedCallSequence), fmt.Errorf("Received %d unexpected CSI driver calls", len(calls)-len(expectedCallSequence)) diff --git a/test/e2e/storage/csi_mock/csi_volume_expansion.go b/test/e2e/storage/csi_mock/csi_volume_expansion.go index 95921fb1e7fe..8ea7dce04e21 100644 --- a/test/e2e/storage/csi_mock/csi_volume_expansion.go +++ b/test/e2e/storage/csi_mock/csi_volume_expansion.go @@ -201,6 +201,130 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { }) } }) + ginkgo.Context("CSI online volume expansion with secret[Feature:CSINodeExpandSecret]", func() { + var stringSecret = map[string]string{ + "username": "admin", + "password": "t0p-Secret", + } + trackedCalls := []string{ + "NodeExpandVolume", + } + tests := []struct { + name string + disableAttach bool + expectedCalls []csiCall + + // Called for each NodeExpandVolume calls, with counter incremented atomically before + // the invocation (i.e first value will be 1). + nodeExpandHook func(counter int64) error + }{ + { + name: "should expand volume without restarting pod if attach=on, nodeExpansion=on, csiNodeExpandSecret=on", + expectedCalls: []csiCall{ + {expectedMethod: "NodeExpandVolume", expectedError: codes.OK, expectedSecret: stringSecret}, + }, + }, + } + for _, t := range tests { + test := t + ginkgo.It(test.name, func(ctx context.Context) { + var ( + err error + hooks *drivers.Hooks + secretName = "test-secret" + secret = &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: secretName, + }, + StringData: stringSecret, + } + ) + if test.nodeExpandHook != nil { + hooks = createPreHook("NodeExpandVolume", test.nodeExpandHook) + } + params := testParameters{enableResizing: true, enableNodeExpansion: true, enableCSINodeExpandSecret: true, hooks: hooks} + if test.disableAttach { + params.disableAttach = true + params.registerDriver = true + } + + m.init(ctx, params) + ginkgo.DeferCleanup(m.cleanup) + + if secret, err := m.cs.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + framework.Failf("unable to create test secret %s: %v", secret.Name, err) + } + + sc, pvc, pod := m.createPod(ctx, pvcReference) + gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") + + if !*sc.AllowVolumeExpansion { + framework.Fail("failed creating sc with allowed expansion") + } + if sc.Parameters == nil { + framework.Fail("failed creating sc with secret") + } + if _, ok := sc.Parameters[csiNodeExpandSecretKey]; !ok { + framework.Failf("creating sc without %s", csiNodeExpandSecretKey) + } + if _, ok := sc.Parameters[csiNodeExpandSecretNamespaceKey]; !ok { + framework.Failf("creating sc without %s", csiNodeExpandSecretNamespaceKey) + } + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "Failed to start pod1: %v", err) + + pv, err := m.cs.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + framework.Failf("failed to get pv %s, %v", pvc.Spec.VolumeName, err) + } + if pv.Spec.CSI == nil || pv.Spec.CSI.NodeExpandSecretRef == nil { + framework.Fail("creating pv without 'NodeExpandSecretRef'") + } + + ginkgo.By("Expanding current pvc") + newSize := resource.MustParse("6Gi") + newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, m.cs) + framework.ExpectNoError(err, "While updating pvc for more size") + pvc = newPVC + gomega.Expect(pvc).NotTo(gomega.BeNil()) + + pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] + if pvcSize.Cmp(newSize) != 0 { + framework.Failf("error updating pvc size %q", pvc.Name) + } + + ginkgo.By("Waiting for persistent volume resize to finish") + err = testsuites.WaitForControllerVolumeResize(ctx, pvc, m.cs, csiResizeWaitPeriod) + framework.ExpectNoError(err, "While waiting for PV resize to finish") + + ginkgo.By("Waiting for all remaining expected CSI calls") + err = wait.Poll(time.Second, csiResizeWaitPeriod, func() (done bool, err error) { + _, index, err := compareCSICalls(ctx, trackedCalls, test.expectedCalls, m.driver.GetCalls) + if err != nil { + return true, err + } + if index == 0 { + // No CSI call received yet + return false, nil + } + if len(test.expectedCalls) == index { + // all calls received + return true, nil + } + return false, nil + }) + framework.ExpectNoError(err, "while waiting for all CSI calls") + + ginkgo.By("Waiting for PVC resize to finish") + pvc, err = testsuites.WaitForFSResize(ctx, pvc, m.cs) + framework.ExpectNoError(err, "while waiting for PVC to finish") + + pvcConditions := pvc.Status.Conditions + framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions") + }) + } + }) ginkgo.Context("CSI online volume expansion", func() { tests := []struct { name string diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 19661d56c037..4c4a45cf041c 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -378,6 +378,7 @@ type MockCSICall struct { Method string Request struct { VolumeContext map[string]string `json:"volume_context"` + Secret map[string]string `json:"secret"` } FullError struct { Code codes.Code `json:"code"`