Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding test for volume fstype validation #44565

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions test/e2e/storage/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ go_library(
"volumes.go",
"vsphere_utils.go",
"vsphere_volume_diskformat.go",
"vsphere_volume_fstype.go",
"vsphere_volume_ops_storm.go",
"vsphere_volume_placement.go",
],
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/persistent_volumes-vsphere.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
*/
framework.AddCleanupAction(func() {
if len(volumePath) > 0 {
waitForVSphereDiskToDetach(vsp, volumePath, node)
framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, node))
vsp.DeleteVolume(volumePath)
}
})
Expand Down
14 changes: 9 additions & 5 deletions test/e2e/storage/vsphere_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName
}

// Wait until vsphere vmdk is deteched from the given node or time out after 5 minutes
func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) {
func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
var (
err error
diskAttached = true
Expand All @@ -62,7 +62,9 @@ func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeNam
)
if vsp == nil {
vsp, err = vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
if err != nil {
return err
}
}
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
diskAttached, err = verifyVSphereDiskAttached(vsp, volumePath, nodeName)
Expand All @@ -77,11 +79,13 @@ func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeNam
framework.Logf("Waiting for Volume %q to detach from %q.", volumePath, nodeName)
return false, nil
})
Expect(err).NotTo(HaveOccurred())
if err != nil {
return err
}
if diskAttached {
Expect(fmt.Errorf("Gave up waiting for Volume %q to detach from %q after %v", volumePath, nodeName, detachTimeout)).NotTo(HaveOccurred())
return fmt.Errorf("Gave up waiting for Volume %q to detach from %q after %v", volumePath, nodeName, detachTimeout)
}

return nil
}

// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
Expand Down
146 changes: 146 additions & 0 deletions test/e2e/storage/vsphere_volume_fstype.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
/*
Copyright 2017 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package storage

import (
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)

/*
Test to verify fstype specified in storage-class is being honored after volume creation.

Steps
1. Create StorageClass with fstype set to valid type (default case included).
2. Create PVC which uses the StorageClass created in step 1.
3. Wait for PV to be provisioned.
4. Wait for PVC's status to become Bound.
5. Create pod using PVC on specific node.
6. Wait for Disk to be attached to the node.
7. Execute command in the pod to get fstype.
8. Delete pod and Wait for Volume Disk to be detached from the Node.
9. Delete PVC, PV and Storage Class.
*/

var _ = framework.KubeDescribe("vsphere Volume fstype [Volume]", func() {
f := framework.NewDefaultFramework("volume-fstype")
var (
client clientset.Interface
namespace string
storageclass *storage.StorageClass
pvclaim *v1.PersistentVolumeClaim
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).NotTo(BeZero(), "Unable to find ready and schedulable Node")
})
AfterEach(func() {
var scDeleteError error
var pvDeleteError error
if storageclass != nil {
scDeleteError = client.StorageV1beta1().StorageClasses().Delete(storageclass.Name, nil)
}
if pvclaim != nil {
pvDeleteError = client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaim.Name, nil)
}
framework.ExpectNoError(scDeleteError)
framework.ExpectNoError(pvDeleteError)
storageclass = nil
pvclaim = nil
})

It("verify fstype - ext3 formatted volume", func() {
By("Invoking Test for fstype: ext3")
storageclass, pvclaim = invokeTestForFstype(f, client, namespace, "ext3", "ext3")
})

It("verify disk format type - default value should be ext4", func() {
By("Invoking Test for fstype: Default Value")
storageclass, pvclaim = invokeTestForFstype(f, client, namespace, "", "ext4")
})
})

func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) (*storage.StorageClass, *v1.PersistentVolumeClaim) {

framework.Logf("Invoking Test for fstype: %s", fstype)
scParameters := make(map[string]string)
scParameters["fstype"] = fstype

By("Creating Storage Class With Fstype")
storageClassSpec := getVSphereStorageClassSpec("fstype", scParameters)
storageclass, err := client.StorageV1beta1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred())

By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
Expect(err).NotTo(HaveOccurred())

By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())

// Get new copy of the claim
pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nil, "/bin/df -T /mnt/test | /bin/awk 'FNR == 2 {print $2}' > /mnt/test/fstype && while true ; do sleep 2 ; done")
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Copy link
Contributor

@jeffvance jeffvance Apr 17, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it guaranteed that df will always be in /bin in all test environments? For example in my RHEL vm:

# which df
/usr/bin/df

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is not guaranteed, currently our test environment is using photon vm, we are planning to run these tests on Ubuntu. Can I fix all command paths in separate PR?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually, /bin/df is safer choice as it is default on distros without systemd, e.g. on Debian wheezy that runs on GCE machines with e2e test. On the other hand, /usr/bin/awk should be used.

On any distro with systemd /bin and /usr/bin are the same directories.

Expect(err).NotTo(HaveOccurred())

By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())

pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

// Asserts: Right disk is attached to the pod
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")

_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/test/fstype"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred())

var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)

By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, pod.Spec.NodeName, volumePaths)
Copy link
Contributor

@jeffvance jeffvance Apr 17, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have a pr that attempts to improve helper functions by making sure these funcs always return error (or []error). The reason is so that helper funcs that do multiple tasks (eg. multiple Create's) return an error. Helper/util funcs are invoked by several callers, and the caller should ultimately decide how to handle errors.
deletePodAndWaitForVolumeToDetach calls waitForVSphereDiskToDetach and both seems like they should return error (especially the later).

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

handled all error in deletePodAndWaitForVolumeToDetach() using framework.ExpectNoError(

updated waitForVSphereDiskToDetach to return error.


return storageclass, pvclaim
}
4 changes: 2 additions & 2 deletions test/e2e/storage/vsphere_volume_placement.go
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("Volume Placement [Volume]", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths {
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name))
framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name)))
}
}()

Expand Down Expand Up @@ -381,6 +381,6 @@ func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Inter

By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths {
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName))
framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName)))
}
}