Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Overhauled pv e2e test to reflect common lifecycle #27133

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
142 changes: 100 additions & 42 deletions test/e2e/persistent_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ limitations under the License.
package e2e

import (
"encoding/json"
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
Expand All @@ -38,23 +38,32 @@ func persistentVolumeTestCleanup(client *client.Client, config VolumeTestConfig)
if config.serverImage != "" {
err := podClient.Delete(config.prefix+"-server", nil)
if err != nil {
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
framework.Failf("Failed to delete the server pod: %v", err)
}
}
}

func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) {
// Delete the PersistentVolume
framework.Logf("Deleting PersistentVolume")
err := c.PersistentVolumes().Delete(pv.Name)
if err != nil {
framework.Failf("Delete PersistentVolume failed: %v", err)
}
// Wait for PersistentVolume to Delete
framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 30*time.Second)
}

var _ = framework.KubeDescribe("PersistentVolumes", func() {
f := framework.NewDefaultFramework("pv")
var c *client.Client
var ns string

BeforeEach(func() {
c = f.Client
ns = f.Namespace.Name
})

// Flaky issue: #25294
It("NFS volume can be created, bound, retrieved, unbound, and used by a pod [Flaky]", func() {
It("should create a PersistentVolume, Claim, and a client Pod that will test the read/write access of the volume[Flaky]", func() {
config := VolumeTestConfig{
namespace: ns,
prefix: "nfs",
Expand All @@ -66,58 +75,102 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
persistentVolumeTestCleanup(c, config)
}()

// Create the nfs server pod
pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)

// Define the PersistentVolume and PersistentVolumeClaim
pv := makePersistentVolume(serverIP)
pvc := makePersistentVolumeClaim(ns)

framework.Logf("Creating PersistentVolume using NFS")
// Create the PersistentVolume and wait for PersistentVolume.Status.Phase to be Available
// defer deletion to clean up the PV should the test fail post-creation.
framework.Logf("Creating PersistentVolume")
pv, err := c.PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
if err != nil {
framework.Failf("Create PersistentVolume failed: %v", err)
}
defer deletePersistentVolume(c, pv)
framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 1*time.Second, 20*time.Second)

// Create the PersistentVolumeClaim and wait for Bound phase
framework.Logf("Creating PersistentVolumeClaim")
pvc, err = c.PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())

// allow the binder a chance to catch up. should not be more than 20s.
framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second)
if err != nil {
framework.Failf("Create PersistentVolumeClaim failed: %v", err)
}
framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)

// Wait for PersistentVolume.Status.Phase to be Bound. Can take several minutes.
err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
if err != nil {
framework.Failf("PersistentVolume failed to enter a bound state: %+v", err)
}
// Check the PersistentVolume.ClaimRef.UID for non-nil value as confirmation of the bound state.
framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
pv, err = c.PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred())
if pv.Spec.ClaimRef == nil {
framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv)
if pv.Spec.ClaimRef == nil || len(pv.Spec.ClaimRef.UID) == 0 {
pvJson, _ := json.MarshalIndent(pv, "", " ")
framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef or UID: %+v", string(pvJson))
}

// Check the PersistentVolumeClaim.Status.Phase for Bound state
framework.Logf("Checking PersistentVolumeClaim status is Bound")
pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

During binding the controller saves bound PV first and bound PVC after that. It's not the same etcd transaction and it could happen that the test gets to CPU when PV is saved and WaitForPersistentVolumePhase few lines above returns, but PVC is not saved yet.

Please add WaitForPersistentVolumeClaimPhase here to make sure the controller had time to save both PVC and PV.

if pvcPhase := pvc.Status.Phase; pvcPhase != "Bound" {
framework.Failf("Expected PersistentVolumeClaim status Bound. Actual: %+v. Error: %+v", pvcPhase, err)
}

// Check that the PersistentVolume's ClaimRef contains the UID of the PersistendVolumeClaim
if pvc.ObjectMeta.UID != pv.Spec.ClaimRef.UID {
framework.Failf("Binding failed: PersistentVolumeClaim UID does not match PersistentVolume's ClaimRef UID. ")
}

// writePod writes to the nfs volume
framework.Logf("Creating writePod")
pvc, _ = c.PersistentVolumeClaims(ns).Get(pvc.Name)
writePod := makeWritePod(ns, pvc.Name)
writePod, err = c.Pods(ns).Create(writePod)
if err != nil {
framework.Failf("Create writePod failed: %+v", err)
}

// Wait for the writePod to complete it's lifecycle
err = framework.WaitForPodSuccessInNamespace(c, writePod.Name, writePod.Spec.Containers[0].Name, writePod.Namespace)
if err != nil {
framework.Failf("WritePod exited with error: %+v", err)
} else {
framework.Logf("WritePod exited without error.")
}

// Delete the PersistentVolumeClaim
framework.Logf("Deleting PersistentVolumeClaim to trigger PV Recycling")
err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
Expect(err).NotTo(HaveOccurred())
if err != nil {
framework.Failf("Delete PersistentVolumeClaim failed: %v", err)
}

// allow the recycler a chance to catch up. it has to perform NFS scrub, which can be slow in e2e.
framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second)
// Wait for the PersistentVolume phase to return to Available
framework.Logf("Waiting for recycling process to complete.")
err = framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 3*time.Second, 300*time.Second)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

5m maybe sufficient time for the Phase change... Are you certain the expected Phase is "Available" vs. "Released"? In my test env I get "Released" but that may be a problem with my env...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a good question. From what I've found, the reclaimPolicy isn't specifed. In the test, it does become Available. My guess is that the default policy is Recycle but I haven't confirmed that. Per test output, the PV enters Release then Available, so testing for either works right now.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ya, my problem is probably more related to the fact that the pv's claimRef isn't nil'd out so maybe that causes the Phase to change to Released? Since the original test tested for Available and your test works I'd leave it as is.

if err != nil {
framework.Failf("Recycling failed: %v", err)
}

// Examine the PersistentVolume.ClaimRef and UID. Expect nil values.
pv, err = c.PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred())
if pv.Spec.ClaimRef != nil {
framework.Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv)
if pv.Spec.ClaimRef != nil && len(pv.Spec.ClaimRef.UID) > 0 {
crjson, _ := json.MarshalIndent(pv.Spec.ClaimRef, "", " ")
framework.Failf("Expected a nil ClaimRef or UID. Found: ", string(crjson))
}

// The NFS Server pod we're using contains an index.html file
// Verify the file was really scrubbed from the volume
podTemplate := makeCheckPod(ns, serverIP)
checkpod, err := c.Pods(ns).Create(podTemplate)
framework.ExpectNoError(err, "Failed to create checker pod: %v", err)
err = framework.WaitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace)
Expect(err).NotTo(HaveOccurred())
// must delete PV, otherwise the PV is available and next time a PVC may bind to it and cause new PV fails to bind
framework.Logf("Deleting PersistentVolume")
err = c.PersistentVolumes().Delete(pv.Name)
Expect(err).NotTo(HaveOccurred())
})
})

func makePersistentVolume(serverIP string) *api.PersistentVolume {
// Takes an NFS server IP address and returns a PersistentVolume object for instantiation.
// Specs are expected to match this test's PersistentVolumeClaim
return &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "nfs-",
Expand All @@ -130,7 +183,7 @@ func makePersistentVolume(serverIP string) *api.PersistentVolume {
PersistentVolumeSource: api.PersistentVolumeSource{
NFS: &api.NFSVolumeSource{
Server: serverIP,
Path: "/",
Path: "/exports",
ReadOnly: false,
},
},
Expand All @@ -144,6 +197,8 @@ func makePersistentVolume(serverIP string) *api.PersistentVolume {
}

func makePersistentVolumeClaim(ns string) *api.PersistentVolumeClaim {
// Takes a namespace and returns a PersistentVolumeClaim object for instantiation.
// Specs are expected to match this test's PersistentVolume
return &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
GenerateName: "pvc-",
Expand All @@ -164,45 +219,48 @@ func makePersistentVolumeClaim(ns string) *api.PersistentVolumeClaim {
}
}

func makeCheckPod(ns string, nfsserver string) *api.Pod {
func makeWritePod(ns string, pvcName string) *api.Pod {
// Prepare pod that mounts the NFS volume again and
// checks that /mnt/index.html was scrubbed there

var isPrivileged bool = true
return &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Default.GroupVersion().String(),
},
ObjectMeta: api.ObjectMeta{
GenerateName: "checker-",
GenerateName: "write-pod-",
Namespace: ns,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "scrub-checker",
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", "test ! -e /mnt/index.html || exit 1"},
Args: []string{"-c", "touch /mnt/SUCCESS && exit 0 || exit 1"},
VolumeMounts: []api.VolumeMount{
{
Name: "nfs-volume",
Name: "nfs-pvc",
MountPath: "/mnt",
},
},
SecurityContext: &api.SecurityContext{
Privileged: &isPrivileged,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I assume the pod needs to be priv to write to the mount. However, in general, we don't want to encourage priv pods, so maybe you can change the perms on the mount? The nfs-server pod creates the export on /:

#nfs-server pod:
sh-4.2# cat /etc/exports               
/exports *(rw,fsid=0,insecure,no_root_squash)
/ *(rw,fsid=0,insecure,no_root_squash)

and the nfs pv created above sets the vol's path to "/". Perm's on the nfs-server's "/" are:

drwxr-xr-x. 18 root root 4096 Jun  9 20:26 /

so non-owner(root) can only read, not write.
This is mostly rambling and I can't provide a better solution than priv until I am able to get the checker pod running in my env...

},
},
},
Volumes: []api.Volume{
{
Name: "nfs-volume",
Name: "nfs-pvc",
VolumeSource: api.VolumeSource{
NFS: &api.NFSVolumeSource{
Server: nfsserver,
Path: "/",
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
},
},
},
}

}