Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e test for dynamic provisioning. #21140

Merged
merged 1 commit into from
Mar 15, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
61 changes: 57 additions & 4 deletions test/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ const (
// String used to mark pod deletion
nonExist = "NonExist"

// How often to poll pods and nodes.
// How often to poll pods, nodes and claims.
poll = 2 * time.Second

// service accounts are provisioned after namespace creation
Expand All @@ -127,6 +127,9 @@ const (
podRespondingTimeout = 2 * time.Minute
serviceRespondingTimeout = 2 * time.Minute
endpointRegisterTimeout = time.Minute

// How long claims have to become dynamically provisioned
claimProvisionTimeout = 5 * time.Minute
)

// SubResource proxy should have been functional in v1.0.0, but SubResource
Expand Down Expand Up @@ -774,6 +777,46 @@ func waitForPersistentVolumePhase(phase api.PersistentVolumePhase, c *client.Cli
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}

// waitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func waitForPersistentVolumeDeleted(c *client.Client, pvName string, poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.PersistentVolumes().Get(pvName)
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}

// waitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func waitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c *client.Client, ns string, pvcName string, poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pvc, err := c.PersistentVolumeClaims(ns).Get(pvcName)
if err != nil {
Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}

// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c *client.Client, labels map[string]string) (*api.Namespace, error) {
Expand Down Expand Up @@ -976,9 +1019,9 @@ func waitForPodTerminatedInNamespace(c *client.Client, podName, reason, namespac
})
}

// waitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodCondition(c, namespace, podName, "success or failure", podStartTimeout, func(pod *api.Pod) (bool, error) {
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c *client.Client, podName string, contName string, namespace string, timeout time.Duration) error {
return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *api.Pod) (bool, error) {
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
ci, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, contName)
if !ok {
Expand All @@ -997,6 +1040,16 @@ func waitForPodSuccessInNamespace(c *client.Client, podName string, contName str
})
}

// waitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func waitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, podStartTimeout)
}

// waitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func waitForPodSuccessInNamespaceSlow(c *client.Client, podName string, contName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, contName, namespace, slowPodStartTimeout)
}

// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodOnNode(c *client.Client, ns, rcName, node string) (*api.Pod, error) {
Expand Down
193 changes: 193 additions & 0 deletions test/e2e/volume_provisioning.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"time"

"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)

const (
// Requested size of the volume
requestedSize = "1500Mi"
// Expected size of the volume is 2GiB, because all three supported cloud
// providers allocate volumes in 1GiB chunks.
expectedSize = "2Gi"
)

var _ = Describe("Dynamic provisioning", func() {
framework := NewDefaultFramework("volume-provisioning")

// filled in BeforeEach
var c *client.Client
var ns string

BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
})

Describe("DynamicProvisioner", func() {
It("should create and delete persistent volumes", func() {
SkipUnlessProviderIs("openstack", "gce", "aws")
By("creating a claim with a dynamic provisioning annotation")
claim := createClaim(ns)
defer func() {
c.PersistentVolumeClaims(ns).Delete(claim.Name)
}()
claim, err := c.PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())

err = waitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, poll, claimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())

By("checking the claim")
// Get new copy of the claim
claim, err = c.PersistentVolumeClaims(ns).Get(claim.Name)
Expect(err).NotTo(HaveOccurred())

// Get the bound PV
pv, err := c.PersistentVolumes().Get(claim.Spec.VolumeName)
Expect(err).NotTo(HaveOccurred())

// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

requestedCapacity := resource.MustParse(requestedSize)
claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

// Check PV properties
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))

// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both suceed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable")
runInPodWithVolume(c, ns, claim.Name, "echo 'hello world' > /mnt/test/data")

By("checking the created volume is readable and retains data")
runInPodWithVolume(c, ns, claim.Name, "grep 'hello world' /mnt/test/data")

// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not for this PR. But this reminds me that we need to think through the logic that triggers deletion of auto-provisioned volumes. Opened up #22975

// probably collide with destruction of the pods above - the pods
// still have the volume attached (kubelet is slow...) and deletion
// of attached volume is not allowed by AWS/GCE/OpenStack.
// Kubernetes *will* retry deletion several times in
// pvclaimbinder-sync-period.
// So, technically, this sleep is not needed. On the other hand,
// the sync perion is 10 minutes and we really don't want to wait
// 10 minutes here. There is no way how to see if kubelet is
// finished with cleaning volumes. A small sleep here actually
// speeds up the test!
// One minute should be enough to clean up the pods properly.
// Detaching e.g. a Cinder volume takes some time.
By("Sleeping to let kubelet destroy all pods")
time.Sleep(time.Minute)

By("deleting the claim")
expectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name))

// Wait for the PV to get deleted too.
expectNoError(waitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 10*time.Minute))
})
})
})

func createClaim(ns string) *api.PersistentVolumeClaim {
return &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "",
},
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(requestedSize),
},
},
},
}
}

// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c *client.Client, ns, claimName, command string) {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: api.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "volume-tester",
Image: "gcr.io/google_containers/busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []api.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: api.RestartPolicyNever,
Volumes: []api.Volume{
{
Name: "my-volume",
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
pod, err := c.Pods(ns).Create(pod)
defer func() {
expectNoError(c.Pods(ns).Delete(pod.Name, nil))
}()
expectNoError(err, "Failed to create pod: %v", err)
expectNoError(waitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace))
}