Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e: use log functions of core framework on pv, testfiles and volume sub packages #85666

Merged
merged 1 commit into from Nov 28, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion test/e2e/BUILD
Expand Up @@ -68,7 +68,6 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/e2e.go
Expand Up @@ -39,7 +39,6 @@ import (
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/manifest"
Expand Down Expand Up @@ -86,7 +85,7 @@ func RunE2ETests(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()

gomega.RegisterFailHandler(e2elog.Fail)
gomega.RegisterFailHandler(framework.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
Expand Down
1 change: 0 additions & 1 deletion test/e2e/framework/pv/BUILD
Expand Up @@ -16,7 +16,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
Expand Down
51 changes: 25 additions & 26 deletions test/e2e/framework/pv/pv.go
Expand Up @@ -31,7 +31,6 @@ import (
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)

Expand Down Expand Up @@ -143,15 +142,15 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
}
} else {
e2elog.Logf("pvc is nil")
framework.Logf("pvc is nil")
}
if pv != nil {
err := DeletePersistentVolume(c, pv.Name)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
}
} else {
e2elog.Logf("pv is nil")
framework.Logf("pv is nil")
}
return errs
}
Expand Down Expand Up @@ -185,7 +184,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
// DeletePersistentVolume deletes the PV.
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
if c != nil && len(pvName) > 0 {
e2elog.Logf("Deleting PersistentVolume %q", pvName)
framework.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
Expand All @@ -197,7 +196,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
// DeletePersistentVolumeClaim deletes the Claim.
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
if c != nil && len(pvcName) > 0 {
e2elog.Logf("Deleting PersistentVolumeClaim %q", pvcName)
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
Expand All @@ -211,14 +210,14 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName
e2elog.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
if err != nil {
return err
}

// Wait for the PV's phase to return to be `expectPVPhase`
e2elog.Logf("Waiting for reclaim process to complete.")
framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
Expand All @@ -243,7 +242,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
}
}

e2elog.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
framework.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
return nil
}

Expand Down Expand Up @@ -360,7 +359,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind {
preBindMsg = " pre-bound"
}
e2elog.Logf("Creating a PV followed by a%s PVC", preBindMsg)
framework.Logf("Creating a PV followed by a%s PVC", preBindMsg)

// make the pv and pvc definitions
pv := MakePersistentVolume(pvConfig)
Expand Down Expand Up @@ -433,7 +432,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
e2elog.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
Expand Down Expand Up @@ -489,8 +488,8 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
if err != nil && len(pvols) > len(claims) {
e2elog.Logf("WARN: pv %v is not bound after max wait", pvName)
e2elog.Logf(" This may be ok since there are more pvs than pvcs")
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs")
continue
}
if err != nil {
Expand Down Expand Up @@ -604,7 +603,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
}

if cfg.VolumeMode != nil && *cfg.VolumeMode == "" {
e2elog.Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
framework.Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
cfg.VolumeMode = nil
}

Expand Down Expand Up @@ -634,10 +633,10 @@ func createPDWithRetry(zone string) (string, error) {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
newDiskName, err = createPD(zone)
if err != nil {
e2elog.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
framework.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
continue
}
e2elog.Logf("Successfully created a new PD: %q.", newDiskName)
framework.Logf("Successfully created a new PD: %q.", newDiskName)
return newDiskName, nil
}
return "", err
Expand All @@ -659,10 +658,10 @@ func DeletePDWithRetry(diskName string) error {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
err = deletePD(diskName)
if err != nil {
e2elog.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
framework.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
continue
}
e2elog.Logf("Successfully deleted PD %q.", diskName)
framework.Logf("Successfully deleted PD %q.", diskName)
return nil
}
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
Expand Down Expand Up @@ -710,18 +709,18 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist

// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
}
if pv.Status.Phase == phase {
e2elog.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
framework.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
}
e2elog.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
framework.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
Expand All @@ -737,22 +736,22 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
e2elog.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
}
if pvc.Status.Phase == phase {
e2elog.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
framework.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
if matchAny {
return nil
}
} else {
e2elog.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
framework.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
phaseFoundInAllClaims = false
}
}
Expand Down Expand Up @@ -808,7 +807,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
if len(scName) == 0 {
return "", fmt.Errorf("No default storage class found")
}
e2elog.Logf("Default storage class: %q", scName)
framework.Logf("Default storage class: %q", scName)
return scName, nil
}

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/framework/testfiles/BUILD
Expand Up @@ -5,7 +5,7 @@ go_library(
srcs = ["testfiles.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/testfiles",
visibility = ["//visibility:public"],
deps = ["//test/e2e/framework/log:go_default_library"],
deps = ["//test/e2e/framework:go_default_library"],
)

filegroup(
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/framework/testfiles/testfiles.go
Expand Up @@ -34,7 +34,7 @@ import (
"sort"
"strings"

e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)

var filesources []FileSource
Expand Down Expand Up @@ -73,7 +73,7 @@ type FileSource interface {
func ReadOrDie(filePath string) []byte {
data, err := Read(filePath)
if err != nil {
e2elog.Fail(err.Error(), 1)
framework.Fail(err.Error(), 1)
}
return data
}
Expand Down Expand Up @@ -110,7 +110,7 @@ func Exists(filePath string) bool {
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
e2elog.Fail(fmt.Sprintf("fatal error looking for test file %s: %s", filePath, err), 1)
framework.Fail(fmt.Sprintf("fatal error looking for test file %s: %s", filePath, err), 1)
}
if data != nil {
return true
Expand Down
1 change: 0 additions & 1 deletion test/e2e/framework/volume/BUILD
Expand Up @@ -13,7 +13,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/storage/utils:go_default_library",
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/framework/volume/fixtures.go
Expand Up @@ -51,7 +51,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
Expand Down Expand Up @@ -419,7 +418,7 @@ func CleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, se
}
}

e2elog.Logf("Deleting server pod %q...", serverPod.Name)
framework.Logf("Deleting server pod %q...", serverPod.Name)
err := e2epod.DeletePodWithWait(cs, serverPod)
if err != nil {
framework.Logf("Server pod delete failed: %v", err)
Expand Down