Skip to content

Commit

Permalink
add test for vsphere driver snapshot configuration
Browse files Browse the repository at this point in the history
  • Loading branch information
RomanBednar committed Apr 17, 2024
1 parent f1a21f4 commit a66f1f6
Showing 1 changed file with 358 additions and 0 deletions.
358 changes: 358 additions & 0 deletions test/extended/storage/driver_configuration.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,358 @@
package storage

import (
"context"
"fmt"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
operatorv1 "github.com/openshift/api/operator/v1"
exutil "github.com/openshift/origin/test/extended/util"
"gopkg.in/ini.v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework"
e2e "k8s.io/kubernetes/test/e2e/framework"
k8simage "k8s.io/kubernetes/test/utils/image"
"strings"
"time"
)

var (
globalSnapshotOption = "globalMaxSnapshotsPerBlockVolume"
vsanSnapshotOption = "granularMaxSnapshotsPerBlockVolumeInVSAN"
vvolSnapshotOption = "granularMaxSnapshotsPerBlockVolumeInVVOL"
projectName = "csi-driver-configuration"
providerName = "csi.vsphere.vmware.com"
)

// This is [Serial] because it modifies clustercsidriver
var _ = g.Describe("[sig-storage][Feature:VSphereDriverConfiguration][Serial] vSphere CSI Driver Configuration", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI(projectName)

var (
originalDriverConfigSpec *operatorv1.CSIDriverConfigSpec
clusterCSIDriver *operatorv1.ClusterCSIDriver
testSkipped bool
err error

beforeEach = func(oc *exutil.CLI) {
//TODO: remove when GA
if !exutil.IsTechPreviewNoUpgrade(oc) {
testSkipped = true
g.Skip("this test is only expected to work with TechPreviewNoUpgrade clusters")
}

if !framework.ProviderIs("vsphere") {
testSkipped = true
g.Skip("this test is only expected to work with vSphere clusters")
}

originalClusterCSIDriver, err := oc.AdminOperatorClient().OperatorV1().ClusterCSIDrivers().Get(context.Background(), providerName, metav1.GetOptions{})
originalDriverConfigSpec = originalClusterCSIDriver.Spec.DriverConfig.DeepCopy()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("storing original clustercsidriver: %s", originalDriverConfigSpec)
}

afterEach = func(oc *exutil.CLI) {
if testSkipped {
return
}

clusterCSIDriver, err = oc.AdminOperatorClient().OperatorV1().ClusterCSIDrivers().Get(context.Background(), providerName, metav1.GetOptions{})
clusterCSIDriver.Spec.DriverConfig = *originalDriverConfigSpec
_, err = oc.AdminOperatorClient().OperatorV1().ClusterCSIDrivers().Update(context.Background(), clusterCSIDriver, metav1.UpdateOptions{})
if err != nil {
e2e.Failf("failed to update ClusterCSIDriver: %v", err)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
)

g.Context(fmt.Sprintf("setting %s option in clusterCSIDriver", globalSnapshotOption), func() {
var (
cloudConfigOption = "global-max-snapshots-per-block-volume"
cloudConfigValue = 4
)

g.BeforeEach(func() {
beforeEach(oc)
})

g.AfterEach(func() {
afterEach(oc)
})

g.It("should update config map and allow configured number of snapshots to be created", func() {
g.By(fmt.Sprintf("setting %s in cloud.conf", cloudConfigOption))

e2e.Logf("updating %s to %d in clustercsidriver", globalSnapshotOption, cloudConfigValue)
setClusterCSIDriverSnapshotOptions(oc, globalSnapshotOption, cloudConfigValue)

e2e.Logf("checking cloud.conf for %s", globalSnapshotOption)
o.Eventually(func() error {
return loadAndCheckCloudConf(oc, "Snapshot", cloudConfigOption, cloudConfigValue)
}, time.Minute, time.Second).Should(o.Succeed())

pvc, err := createTestPVC(oc, oc.Namespace(), "test-pvc", "1Gi")
o.Expect(err).NotTo(o.HaveOccurred())

_, err = createTestPod(oc, pvc.Name, oc.Namespace())
o.Expect(err).NotTo(o.HaveOccurred())

//wait for pvc to be bound
o.Eventually(func() error {
pvc, err := oc.AdminKubeClient().CoreV1().PersistentVolumeClaims(oc.Namespace()).Get(context.Background(), "test-pvc", metav1.GetOptions{})
if err != nil {
return err
}
if pvc.Status.Phase != v1.ClaimBound {
return fmt.Errorf("PVC not bound")
}
return nil
})

for i := 0; i < cloudConfigValue; i++ {
err := createSnapshot(oc, oc.Namespace(), fmt.Sprintf("test-snapshot-%d", i), "test-pvc")
o.Expect(err).NotTo(o.HaveOccurred())
}

// next snapshot creation should be over the set limit and fail
err = createSnapshot(oc, oc.Namespace(), "test-snapshot-failed", "test-pvc")
o.Expect(err).NotTo(o.HaveOccurred())

readyToUse, err := oc.Run("get").Args("volumesnapshot/test-snapshot-failed", "-o", "jsonpath={.status.readyToUse}").Output()
o.Expect(err).NotTo(o.HaveOccurred())

errMsg, err := oc.Run("get").Args("volumesnapshot/test-snapshot-failed", "-o", "jsonpath={.status.error.message}").Output()
o.Expect(err).NotTo(o.HaveOccurred())

if !strings.Contains(errMsg, "failed") && readyToUse != "false" {
e2e.Failf("VolumeSnapshot \"test-snapshot-failed\" have failed and should not be ready to use")
}

})
})

g.Context(fmt.Sprintf("setting %s option in clusterCSIDriver", vsanSnapshotOption), func() {
var (
cloudConfigOption = "granular-max-snapshots-per-block-volume-vsan"
cloudConfigValue = 4
)

g.BeforeEach(func() {
beforeEach(oc)
})

g.AfterEach(func() {
afterEach(oc)
})

g.It("should update config map", func() {
g.By(fmt.Sprintf("setting %s in cloud.conf", cloudConfigOption))

e2e.Logf("updating %s to %d in clustercsidriver", vsanSnapshotOption, cloudConfigValue)
setClusterCSIDriverSnapshotOptions(oc, vsanSnapshotOption, 5)

e2e.Logf("checking cloud.conf for %s", cloudConfigOption)
o.Eventually(func() error {
return loadAndCheckCloudConf(oc, "Snapshot", cloudConfigOption, cloudConfigValue)
}, time.Minute, time.Second).Should(o.Succeed())
})
})

g.Context(fmt.Sprintf("setting %s option in clusterCSIDriver", vvolSnapshotOption), func() {
var (
cloudConfigOption = "granular-max-snapshots-per-block-volume-vvol"
cloudConfigValue = 4
)

g.BeforeEach(func() {
beforeEach(oc)
})

g.AfterEach(func() {
afterEach(oc)
})

g.It("should update config map", func() {
g.By(fmt.Sprintf("setting %s in cloud.conf", cloudConfigOption))

e2e.Logf("updating %s to %d in clustercsidriver", vvolSnapshotOption, cloudConfigValue)
setClusterCSIDriverSnapshotOptions(oc, vvolSnapshotOption, 5)

e2e.Logf("checking cloud.conf for %s", cloudConfigOption)
o.Eventually(func() error {
return loadAndCheckCloudConf(oc, "Snapshot", cloudConfigOption, cloudConfigValue)
}, time.Minute, time.Second).Should(o.Succeed())
})

})

//test all snapshot options at once
g.Context("setting all snapshot options in clusterCSIDriver at once", func() {
var (
clusterCSIDriverOptions = map[string]int{
globalSnapshotOption: 5,
vsanSnapshotOption: 10,
vvolSnapshotOption: 15,
}
cloudConfigOptions = map[string]int{
"global-max-snapshots-per-block-volume": 5,
"granular-max-snapshots-per-block-volume-vsan": 10,
"granular-max-snapshots-per-block-volume-vvol": 15,
}
)

g.BeforeEach(func() {
beforeEach(oc)
})

g.AfterEach(func() {
afterEach(oc)
})

g.It("should update config map", func() {
for option, value := range clusterCSIDriverOptions {
e2e.Logf("updating %s to %d in clustercsidriver", option, value)
setClusterCSIDriverSnapshotOptions(oc, option, value)
}

for option, value := range cloudConfigOptions {
o.Eventually(func() error {
return loadAndCheckCloudConf(oc, "Snapshot", option, value)
}, time.Minute, time.Second).Should(o.Succeed())
}
})

})
})

func setClusterCSIDriverSnapshotOptions(oc *exutil.CLI, snapshotOptions string, value int) {
patch := []byte(fmt.Sprintf("{\"spec\":{\"driverConfig\":{\"vSphere\":{\"%s\": %d}}}}", snapshotOptions, value))
_, err := oc.AdminOperatorClient().OperatorV1().ClusterCSIDrivers().Patch(context.Background(), providerName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
e2e.Failf("failed to patch ClusterCSIDriver: %v", err)
}
}

func loadAndCheckCloudConf(oc *exutil.CLI, sectionName string, keyName string, expectedValue int) error {
cm, err := oc.AdminKubeClient().CoreV1().ConfigMaps("openshift-cluster-csi-drivers").Get(context.Background(), "vsphere-csi-config", metav1.GetOptions{})
if err != nil {
e2e.Failf("failed to get ConfigMap: %v", err)
return err
}

cloudConfData, ok := cm.Data["cloud.conf"]
if !ok {
return fmt.Errorf("cloud.conf key not found in ConfigMap")
}

cfg, err := ini.Load([]byte(cloudConfData))
if err != nil {
e2e.Failf("failed to load cloud.conf: %v", err)
return err
}

section, err := cfg.GetSection(sectionName)
if err != nil {
return fmt.Errorf("section %s not found in cloud.conf: %v", sectionName, err)
}

key, err := section.GetKey(keyName)
if err != nil {
return fmt.Errorf("key %s not found in section %s: %v", keyName, sectionName, err)
}

o.Expect(key.String()).To(o.Equal(fmt.Sprintf("%d", expectedValue)))

return nil
}

func createTestPod(oc *exutil.CLI, pvcName string, namespace string) (*v1.Pod, error) {
allowPrivEsc := false
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-driver-conf",
Namespace: namespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test",
Image: k8simage.GetE2EImage(k8simage.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
Name: "pvc-data",
MountPath: "/mnt",
},
},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: &allowPrivEsc,
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{"ALL"},
},
},
},
},
Volumes: []v1.Volume{
{
Name: "pvc-data",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
},
},
},
}

return oc.AdminKubeClient().CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{})
}

func createTestPVC(oc *exutil.CLI, namespace string, pvcName string, volumeSize string) (*v1.PersistentVolumeClaim, error) {
e2e.Logf("creating PVC %s in namespace %s with size %s", pvcName, namespace, volumeSize)

pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{"ReadWriteOnce"},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(volumeSize),
},
},
},
}

return oc.AdminKubeClient().CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), pvc, metav1.CreateOptions{})
}

func createSnapshot(oc *exutil.CLI, namespace string, snapshotName string, pvcName string) error {
snapshot := fmt.Sprintf(`
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
name: %s
namespace: %s
spec:
source:
persistentVolumeClaimName: %s
`, snapshotName, namespace, pvcName)

err := oc.AsAdmin().Run("apply").Args("-f", "-").InputString(snapshot).Execute()
if err != nil {
return fmt.Errorf("failed to create snapshot: %v", err)
}

return nil
}

0 comments on commit a66f1f6

Please sign in to comment.