Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move pvutil.go from e2e package to framework package #41373

Merged
merged 2 commits into from
Feb 23, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion test/e2e/BUILD
Expand Up @@ -77,7 +77,6 @@ go_library(
"proxy.go",
"pv_reclaimpolicy.go",
"pvc_label_selector.go",
"pvutil.go",
"rc.go",
"reboot.go",
"replica_set.go",
Expand Down
3 changes: 3 additions & 0 deletions test/e2e/framework/BUILD
Expand Up @@ -25,6 +25,7 @@ go_library(
"nodes_util.go",
"perf_util.go",
"pods.go",
"pv_util.go",
"resource_usage_gatherer.go",
"service_util.go",
"test_context.go",
Expand Down Expand Up @@ -68,6 +69,7 @@ go_library(
"//pkg/util/labels:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//test/e2e/perftype:go_default_library",
Expand All @@ -86,6 +88,7 @@ go_library(
"//vendor:google.golang.org/api/compute/v1",
"//vendor:google.golang.org/api/googleapi",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/api/resource",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
"//vendor:k8s.io/apimachinery/pkg/fields",
Expand Down
172 changes: 79 additions & 93 deletions test/e2e/pvutil.go → test/e2e/framework/pv_util.go

Large diffs are not rendered by default.

20 changes: 14 additions & 6 deletions test/e2e/framework/util.go
Expand Up @@ -4590,6 +4590,14 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
return string(logs), err
}

func GetGCECloud() (*gcecloud.GCECloud, error) {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
return gceCloud, nil
}

// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
Expand All @@ -4600,9 +4608,9 @@ func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
}

func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
Expand Down Expand Up @@ -5090,9 +5098,9 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
func CleanupGCEResources(loadBalancerName string) (retErr error) {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
if err := gceCloud.DeleteFirewall(loadBalancerName); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/kubelet.go
Expand Up @@ -415,8 +415,8 @@ var _ = framework.KubeDescribe("kubelet", func() {
})

AfterEach(func() {
deletePodWithWait(f, c, pod)
deletePodWithWait(f, c, nfsServerPod)
framework.DeletePodWithWait(f, c, pod)
framework.DeletePodWithWait(f, c, nfsServerPod)
})

// execute It blocks from above table of tests
Expand All @@ -427,11 +427,11 @@ var _ = framework.KubeDescribe("kubelet", func() {
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)

By("Delete the NFS server pod")
deletePodWithWait(f, c, nfsServerPod)
framework.DeletePodWithWait(f, c, nfsServerPod)
nfsServerPod = nil

By("Delete the pod mounted to the NFS volume")
deletePodWithWait(f, c, pod)
framework.DeletePodWithWait(f, c, pod)
// pod object is now stale, but is intentionally not nil

By("Check if host running deleted pod has been cleaned up -- expect not")
Expand Down
18 changes: 4 additions & 14 deletions test/e2e/pd.go
Expand Up @@ -589,7 +589,7 @@ func createPD() (string, error) {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))

gceCloud, err := getGCECloud()
gceCloud, err := framework.GetGCECloud()
if err != nil {
return "", err
}
Expand Down Expand Up @@ -624,7 +624,7 @@ func createPD() (string, error) {

func deletePD(pdName string) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := getGCECloud()
gceCloud, err := framework.GetGCECloud()
if err != nil {
return err
}
Expand Down Expand Up @@ -663,7 +663,7 @@ func deletePD(pdName string) error {

func detachPD(nodeName types.NodeName, pdName string) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := getGCECloud()
gceCloud, err := framework.GetGCECloud()
if err != nil {
return err
}
Expand Down Expand Up @@ -771,7 +771,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
gceCloud, err := getGCECloud()
gceCloud, err := framework.GetGCECloud()
if err != nil {
return err
}
Expand All @@ -798,16 +798,6 @@ func waitForPDDetach(diskName string, nodeName types.NodeName) error {
return nil
}

func getGCECloud() (*gcecloud.GCECloud, error) {
gceCloud, ok := framework.TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)

if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", framework.TestContext.CloudConfig.Provider)
}

return gceCloud, nil
}

func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
for _, host := range hosts {
framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
Expand Down
24 changes: 12 additions & 12 deletions test/e2e/persistent_volumes-disruptive.go
Expand Up @@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
c clientset.Interface
ns string
nfsServerPod *v1.Pod
nfsPVconfig persistentVolumeConfig
nfsPVconfig framework.PersistentVolumeConfig
nfsServerIP, clientNodeIP string
clientNode *v1.Node
)
Expand All @@ -72,9 +72,9 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
framework.Logf("[BeforeEach] Configuring PersistentVolume")
nfsServerIP = nfsServerPod.Status.PodIP
Expect(nfsServerIP).NotTo(BeEmpty())
nfsPVconfig = persistentVolumeConfig{
namePrefix: "nfs-",
pvSource: v1.PersistentVolumeSource{
nfsPVconfig = framework.PersistentVolumeConfig{
NamePrefix: "nfs-",
PVSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nfsServerIP,
Path: "/exports",
Expand All @@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
})

AfterEach(func() {
deletePodWithWait(f, c, nfsServerPod)
framework.DeletePodWithWait(f, c, nfsServerPod)
})

Context("when kubelet restarts", func() {
Expand Down Expand Up @@ -175,7 +175,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew

By("Restarting the kubelet.")
kubeletCommand(kStop, c, clientPod)
deletePodWithWait(f, c, clientPod)
framework.DeletePodWithWait(f, c, clientPod)
kubeletCommand(kStart, c, clientPod)

By("Expecting the volume mount not to be found.")
Expand All @@ -187,9 +187,9 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
}

// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc := createPVPVC(c, pvConfig, ns, false)
pod := makePod(ns, pvc.Name)
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc := framework.CreatePVPVC(c, pvConfig, ns, false)
pod := framework.MakePod(ns, pvc.Name)
pod.Spec.NodeName = nodeName
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
pod, err := c.Core().Pods(ns).Create(pod)
Expand All @@ -208,9 +208,9 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persis

// tearDownTestCase destroy resources created by initTestCase.
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, pod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
deletePodWithWait(f, c, pod)
deletePersistentVolumeClaim(c, pvc.Name, ns)
deletePersistentVolume(c, pv.Name)
framework.DeletePodWithWait(f, c, pod)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolume(c, pv.Name)
}

// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod.
Expand Down
30 changes: 15 additions & 15 deletions test/e2e/persistent_volumes-vsphere.go
Expand Up @@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
clientPod *v1.Pod
pvConfig persistentVolumeConfig
pvConfig framework.PersistentVolumeConfig
vsp *vsphere.VSphere
err error
node types.NodeName
Expand Down Expand Up @@ -69,23 +69,23 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
if volumePath == "" {
volumePath, err = createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
pvConfig = persistentVolumeConfig{
namePrefix: "vspherepv-",
pvSource: v1.PersistentVolumeSource{
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
prebind: nil,
Prebind: nil,
}
}
By("Creating the PV and PVC")
pv, pvc = createPVPVC(c, pvConfig, ns, false)
waitOnPVandPVC(c, ns, pv, pvc)
pv, pvc = framework.CreatePVPVC(c, pvConfig, ns, false)
framework.WaitOnPVandPVC(c, ns, pv, pvc)

By("Creating the Client Pod")
clientPod = createClientPod(c, ns, pvc)
clientPod = framework.CreateClientPod(c, ns, pvc)
node := types.NodeName(clientPod.Spec.NodeName)

By("Verify disk should be attached to the node")
Expand All @@ -100,15 +100,15 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
if clientPod != nil {
clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
deletePodWithWait(f, c, clientPod)
framework.DeletePodWithWait(f, c, clientPod)
}
}

if pv != nil {
deletePersistentVolume(c, pv.Name)
framework.DeletePersistentVolume(c, pv.Name)
}
if pvc != nil {
deletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
}
}
})
Expand Down Expand Up @@ -136,15 +136,15 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {

It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Claim")
deletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)

pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
pvc = nil
By("Deleting the Pod")
deletePodWithWait(f, c, clientPod)
framework.DeletePodWithWait(f, c, clientPod)

})

Expand All @@ -157,13 +157,13 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
*/
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Persistent Volume")
deletePersistentVolume(c, pv.Name)
framework.DeletePersistentVolume(c, pv.Name)
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
pv = nil
By("Deleting the pod")
deletePodWithWait(f, c, clientPod)
framework.DeletePodWithWait(f, c, clientPod)
})
})