Skip to content

Commit

Permalink
Merge pull request #160 from andyzhangx/sc-param
Browse files Browse the repository at this point in the history
feat: add Premium_LRS support
  • Loading branch information
andyzhangx committed Jun 5, 2020
2 parents 322a5b1 + 81c3cd6 commit 4a215e1
Show file tree
Hide file tree
Showing 13 changed files with 57 additions and 48 deletions.
2 changes: 1 addition & 1 deletion deploy/example/pv-blobfuse-csi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ spec:
csi:
driver: blobfuse.csi.azure.com
readOnly: false
volumeHandle: arbitrary-volumeid
volumeHandle: uniqe-volumeid # make sure this volumeid is unique in whole cluster
volumeAttributes:
containerName: EXISTING_CONTAINER_NAME
nodeStageSecretRef:
Expand Down
2 changes: 1 addition & 1 deletion deploy/example/storageclass-blobfuse-csi-mountoptions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
name: blobfuse.csi.azure.com
provisioner: blobfuse.csi.azure.com
parameters:
skuName: Standard_LRS # available values: Standard_LRS, Standard_GRS, Standard_RAGRS
skuName: Standard_LRS # available values: Standard_LRS, Premium_LRS, Standard_GRS, Standard_RAGRS
reclaimPolicy: Retain # if set as "Delete" container would be removed after pvc deletion
volumeBindingMode: Immediate
mountOptions:
Expand Down
2 changes: 1 addition & 1 deletion deploy/example/storageclass-blobfuse-csi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,6 @@ metadata:
name: blobfuse.csi.azure.com
provisioner: blobfuse.csi.azure.com
parameters:
skuName: Standard_LRS # available values: Standard_LRS, Standard_GRS, Standard_RAGRS
skuName: Standard_LRS # available values: Standard_LRS, Premium_LRS, Standard_GRS, Standard_RAGRS
reclaimPolicy: Retain # if set as "Delete" container would be removed after pvc deletion
volumeBindingMode: Immediate
4 changes: 2 additions & 2 deletions docs/driver-parameters.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
> get a `mountOptions` example [here](../deploy/example/storageclass-blobfuse-csi-mountoptions.yaml)
Name | Meaning | Example | Mandatory | Default value
Name | Meaning | Example | Mandatory | Default value
--- | --- | --- | --- | ---
skuName | blobfuse storage account type (alias: `storageAccountType`) | `Standard_LRS`, `Standard_GRS`, `Standard_RAGRS` | No | `Standard_LRS`
skuName | blobfuse storage account type (alias: `storageAccountType`) | `Standard_LRS`, `Premium_LRS`, `Standard_GRS`, `Standard_RAGRS` | No | `Standard_LRS`
location | specify the location in which blobfuse share will be created | `eastus`, `westus`, etc. | No | if empty, driver will use the same location name as current k8s cluster
resourceGroup | specify the existing resource group name where the container is | existing resource group name | No | if empty, driver will use the same resource group name as current k8s cluster
storageAccount | specify the storage account name in which blobfuse share will be created | STORAGE_ACCOUNT_NAME | No | if empty, driver will find a suitable storage account that matches `skuName` in the same resource group; if a storage account name is provided, it means that storage account must exist otherwise there would be error
Expand Down
6 changes: 5 additions & 1 deletion pkg/blobfuse/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,11 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest)
resourceGroup = d.cloud.ResourceGroup
}

account, accountKey, err := d.cloud.EnsureStorageAccount(accountName, storageAccountType, string(storage.StorageV2), resourceGroup, location, blobfuseAccountNamePrefix)
accountKind := string(storage.StorageV2)
if strings.EqualFold(storageAccountType, "Premium_LRS") {
accountKind = string(storage.BlockBlobStorage)
}
account, accountKey, err := d.cloud.EnsureStorageAccount(accountName, storageAccountType, accountKind, resourceGroup, location, blobfuseAccountNamePrefix)
if err != nil {
return nil, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
Expand Down
6 changes: 0 additions & 6 deletions test/e2e/driver/blobfuse_csi_driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,3 @@ func (d *blobFuseCSIDriver) GetPreProvisionStorageClass(parameters map[string]st
generateName := fmt.Sprintf("%s-%s-pre-provisioned-sc-", namespace, provisioner)
return getStorageClass(generateName, provisioner, parameters, mountOptions, reclaimPolicy, bindingMode, nil)
}

func GetParameters() map[string]string {
return map[string]string{
"skuName": "Standard_LRS",
}
}
30 changes: 18 additions & 12 deletions test/e2e/dynamic_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,9 @@ var _ = ginkgo.Describe("[blobfuse-csi-e2e] Dynamic Provisioning", func() {
},
}
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: testDriver,
Pods: pods,
CSIDriver: testDriver,
Pods: pods,
StorageClassParameters: map[string]string{"skuName": "Standard_LRS"},
}
test.Run(cs, ns)
})
Expand All @@ -94,6 +95,7 @@ var _ = ginkgo.Describe("[blobfuse-csi-e2e] Dynamic Provisioning", func() {
Cmd: []string{"cat", "/mnt/test-1/data"},
ExpectedString: "hello world\nhello world\n", // pod will be restarted so expect to see 2 instances of string
},
StorageClassParameters: map[string]string{"skuName": "Premium_LRS"},
}
test.Run(cs, ns)
})
Expand All @@ -117,8 +119,9 @@ var _ = ginkgo.Describe("[blobfuse-csi-e2e] Dynamic Provisioning", func() {
},
}
test := testsuites.DynamicallyProvisionedReadOnlyVolumeTest{
CSIDriver: testDriver,
Pods: pods,
CSIDriver: testDriver,
Pods: pods,
StorageClassParameters: map[string]string{"skuName": "Standard_GRS"},
}
test.Run(cs, ns)
})
Expand Down Expand Up @@ -153,9 +156,10 @@ var _ = ginkgo.Describe("[blobfuse-csi-e2e] Dynamic Provisioning", func() {
},
}
test := testsuites.DynamicallyProvisionedCollocatedPodTest{
CSIDriver: testDriver,
Pods: pods,
ColocatePods: true,
CSIDriver: testDriver,
Pods: pods,
ColocatePods: true,
StorageClassParameters: map[string]string{"skuName": "Standard_RAGRS"},
}
test.Run(cs, ns)
})
Expand All @@ -170,8 +174,9 @@ var _ = ginkgo.Describe("[blobfuse-csi-e2e] Dynamic Provisioning", func() {
},
}
test := testsuites.DynamicallyProvisionedReclaimPolicyTest{
CSIDriver: testDriver,
Volumes: volumes,
CSIDriver: testDriver,
Volumes: volumes,
StorageClassParameters: map[string]string{"skuName": "Standard_LRS"},
}
test.Run(cs, ns)
})
Expand All @@ -186,9 +191,10 @@ var _ = ginkgo.Describe("[blobfuse-csi-e2e] Dynamic Provisioning", func() {
},
}
test := testsuites.DynamicallyProvisionedReclaimPolicyTest{
CSIDriver: testDriver,
Volumes: volumes,
Blobfuse: blobfuseDriver,
CSIDriver: testDriver,
Volumes: volumes,
Blobfuse: blobfuseDriver,
StorageClassParameters: map[string]string{"skuName": "Standard_GRS"},
}
test.Run(cs, ns)
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,14 @@ import (
// Waiting for the PV provisioner to create a new PV
// Testing if the Pod(s) Cmd is run with a 0 exit code
type DynamicallyProvisionedCmdVolumeTest struct {
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
StorageClassParameters map[string]string
}

func (t *DynamicallyProvisionedCmdVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver)
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,16 @@ import (
// Waiting for the PV provisioner to create a new PV
// Testing if multiple Pod(s) can write simultaneously
type DynamicallyProvisionedCollocatedPodTest struct {
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
ColocatePods bool
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
ColocatePods bool
StorageClassParameters map[string]string
}

func (t *DynamicallyProvisionedCollocatedPodTest) Run(client clientset.Interface, namespace *v1.Namespace) {
nodeName := ""
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver)
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
if t.ColocatePods && nodeName != "" {
tpod.SetNodeSelector(map[string]string{"name": nodeName})
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,10 @@ import (
// Testing if the Pod can write and read to mounted volumes
// Deleting a pod, and again testing if the Pod can write and read to mounted volumes
type DynamicallyProvisionedDeletePodTest struct {
CSIDriver driver.DynamicPVTestDriver
Pod PodDetails
PodCheck *PodExecCheck
CSIDriver driver.DynamicPVTestDriver
Pod PodDetails
PodCheck *PodExecCheck
StorageClassParameters map[string]string
}

type PodExecCheck struct {
Expand All @@ -39,7 +40,7 @@ type PodExecCheck struct {
}

func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, namespace *v1.Namespace) {
tDeployment, cleanup := t.Pod.SetupDeployment(client, namespace, t.CSIDriver)
tDeployment, cleanup := t.Pod.SetupDeployment(client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,14 @@ const expectedReadOnlyLog = "Read-only file system"
// Waiting for the PV provisioner to create a new PV
// Testing that the Pod(s) cannot write to the volume when mounted
type DynamicallyProvisionedReadOnlyVolumeTest struct {
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
StorageClassParameters map[string]string
}

func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver)
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,15 @@ import (
// DynamicallyProvisionedReclaimPolicyTest will provision required PV(s) and PVC(s)
// Testing the correct behavior for different reclaimPolicies
type DynamicallyProvisionedReclaimPolicyTest struct {
CSIDriver driver.DynamicPVTestDriver
Volumes []VolumeDetails
Blobfuse *blobfuse.Driver
CSIDriver driver.DynamicPVTestDriver
Volumes []VolumeDetails
Blobfuse *blobfuse.Driver
StorageClassParameters map[string]string
}

func (t *DynamicallyProvisionedReclaimPolicyTest) Run(client clientset.Interface, namespace *v1.Namespace) {
for _, volume := range t.Volumes {
tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver)
tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver, t.StorageClassParameters)

// will delete the PVC
// will also wait for PV to be deleted when reclaimPolicy=Delete
Expand Down
12 changes: 6 additions & 6 deletions test/e2e/testsuites/specs.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ type DataSource struct {
Name string
}

func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestPod, []func()) {
func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) {
tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0)
for n, v := range pod.Volumes {
tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver)
tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters)
cleanupFuncs = append(cleanupFuncs, funcs...)
if v.VolumeMode == Block {
tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath)
Expand All @@ -116,11 +116,11 @@ func (pod *PodDetails) SetupWithPreProvisionedVolumes(client clientset.Interface
return tpod, cleanupFuncs
}

func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestDeployment, []func()) {
func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestDeployment, []func()) {
cleanupFuncs := make([]func(), 0)
volume := pod.Volumes[0]
ginkgo.By("setting up the StorageClass")
storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(), volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name)
storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name)
tsc := NewTestStorageClass(client, namespace, storageClass)
createdStorageClass := tsc.Create()
cleanupFuncs = append(cleanupFuncs, tsc.Cleanup)
Expand All @@ -137,10 +137,10 @@ func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1
return tDeployment, cleanupFuncs
}

func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestPersistentVolumeClaim, []func()) {
func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPersistentVolumeClaim, []func()) {
cleanupFuncs := make([]func(), 0)
ginkgo.By("setting up the StorageClass")
storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(), volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name)
storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name)
tsc := NewTestStorageClass(client, namespace, storageClass)
createdStorageClass := tsc.Create()
cleanupFuncs = append(cleanupFuncs, tsc.Cleanup)
Expand Down

0 comments on commit 4a215e1

Please sign in to comment.