Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cleanup test code in lifecycle, servicecatalog and ui package #81980

Merged
merged 2 commits into from
Sep 11, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion test/e2e/lifecycle/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ go_library(
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/lifecycle:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
Expand Down
9 changes: 4 additions & 5 deletions test/e2e/lifecycle/addon_update.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,11 @@ import (
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)

// TODO: it would probably be slightly better to build up the objects
Expand Down Expand Up @@ -299,7 +298,7 @@ var _ = SIGDescribe("Addon update", func() {
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
// Delete the "ensure exist class" addon at the end.
defer func() {
e2elog.Logf("Cleaning up ensure exist class addon.")
framework.Logf("Cleaning up ensure exist class addon.")
err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)
framework.ExpectNoError(err)
}()
Expand Down Expand Up @@ -392,7 +391,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) {
}

func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
e2elog.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
Expand Down Expand Up @@ -424,7 +423,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
}

func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
e2elog.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession()
if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)
Expand Down
1 change: 0 additions & 1 deletion test/e2e/lifecycle/bootstrap/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/lifecycle:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
Expand Down
7 changes: 3 additions & 4 deletions test/e2e/lifecycle/bootstrap/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)

func newTokenSecret(tokenID, tokenSecret string) *v1.Secret {
Expand Down Expand Up @@ -84,7 +83,7 @@ func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID str
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to get cluster-info configMap: %v", err)
framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err
}
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
Expand All @@ -100,7 +99,7 @@ func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, t
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to get cluster-info configMap: %v", err)
framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err
}
updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
Expand All @@ -116,7 +115,7 @@ func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface,
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to get cluster-info configMap: %v", err)
framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err
}
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
Expand Down
11 changes: 5 additions & 6 deletions test/e2e/lifecycle/ha_master.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,11 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)

func addMasterReplica(zone string) error {
e2elog.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
if err != nil {
return err
Expand All @@ -42,7 +41,7 @@ func addMasterReplica(zone string) error {
}

func removeMasterReplica(zone string) error {
e2elog.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
if err != nil {
return err
Expand All @@ -51,7 +50,7 @@ func removeMasterReplica(zone string) error {
}

func addWorkerNodes(zone string) error {
e2elog.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
if err != nil {
return err
Expand All @@ -60,7 +59,7 @@ func addWorkerNodes(zone string) error {
}

func removeWorkerNodes(zone string) error {
e2elog.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
if err != nil {
return err
Expand All @@ -83,7 +82,7 @@ func findRegionForZone(zone string) string {
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output()
framework.ExpectNoError(err)
if string(region) == "" {
e2elog.Failf("Region not found; zone: %s", zone)
framework.Failf("Region not found; zone: %s", zone)
}
return string(region)
}
Expand Down
9 changes: 4 additions & 5 deletions test/e2e/lifecycle/kubelet_security.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,14 @@ import (
"net/http"
"time"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)

var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
Expand Down Expand Up @@ -82,7 +81,7 @@ func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
if err == nil {
conn.Close()
e2elog.Failf("port %d is not disabled", port)
framework.Failf("port %d is not disabled", port)
}
}
}
11 changes: 5 additions & 6 deletions test/e2e/lifecycle/node_lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"

Expand All @@ -47,7 +46,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil())
systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup
}
Expand All @@ -70,7 +69,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {

ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
Expand All @@ -85,11 +84,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}

if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
e2elog.Failf("Couldn't restore the original cluster size: %v", err)
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
Expand All @@ -111,7 +110,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
pass := true
for _, node := range originalNodes.Items {
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
e2elog.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
pass = false
}
}
Expand Down
29 changes: 14 additions & 15 deletions test/e2e/lifecycle/reboot.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
Expand Down Expand Up @@ -74,7 +73,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
framework.ExpectNoError(err)

for _, e := range events.Items {
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
Expand Down Expand Up @@ -138,7 +137,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
nodelist := framework.GetReadySchedulableNodesOrDie(c)
if hook != nil {
defer func() {
e2elog.Logf("Executing termination hook on nodes")
framework.Logf("Executing termination hook on nodes")
hook(framework.TestContext.Provider, nodelist)
}()
}
Expand All @@ -165,10 +164,10 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
for ix := range nodelist.Items {
n := nodelist.Items[ix]
if !result[ix] {
e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
}
}
e2elog.Failf("Test failed; at least one node failed to reboot in the time given.")
framework.Failf("Test failed; at least one node failed to reboot in the time given.")
}
}

Expand All @@ -179,9 +178,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
prefix = "Retrieving log for the last terminated container"
}
if err != nil {
e2elog.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
} else {
e2elog.Logf("%s %s:\n%s\n", prefix, id, log)
framework.Logf("%s %s:\n%s\n", prefix, id, log)
}
}
podNameSet := sets.NewString(podNames...)
Expand All @@ -195,7 +194,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
if ok, _ := testutils.PodRunningReady(p); ok {
continue
}
e2elog.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
// Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
Expand Down Expand Up @@ -224,16 +223,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
ns := metav1.NamespaceSystem
ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
if err != nil {
e2elog.Logf("Couldn't initialize pod store: %v", err)
framework.Logf("Couldn't initialize pod store: %v", err)
return false
}
defer ps.Stop()

// Get the node initially.
e2elog.Logf("Getting %s", name)
framework.Logf("Getting %s", name)
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Couldn't get node %s", name)
framework.Logf("Couldn't get node %s", name)
return false
}

Expand All @@ -258,7 +257,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
podNames = append(podNames, p.ObjectMeta.Name)
}
}
e2elog.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)

// For each pod, we do a sanity check to ensure it's running / healthy
// or succeeded now, as that's what we'll be checking later.
Expand All @@ -269,7 +268,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {

// Reboot the node.
if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil {
e2elog.Logf("Error while issuing ssh command: %v", err)
framework.Logf("Error while issuing ssh command: %v", err)
return false
}

Expand All @@ -291,7 +290,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
return false
}

e2elog.Logf("Reboot successful on node %s", name)
framework.Logf("Reboot successful on node %s", name)
return true
}

Expand All @@ -302,7 +301,7 @@ func catLogHook(logPath string) terminationHook {
for _, n := range nodes.Items {
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
e2elog.Logf("Error while issuing ssh command: %v", err)
framework.Logf("Error while issuing ssh command: %v", err)
}
}

Expand Down
9 changes: 4 additions & 5 deletions test/e2e/lifecycle/resize_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"

Expand Down Expand Up @@ -56,7 +55,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err)
systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup
}
Expand All @@ -81,7 +80,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {

ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
Expand All @@ -96,11 +95,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}

if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
e2elog.Failf("Couldn't restore the original cluster size: %v", err)
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
Expand Down