Skip to content

Commit

Permalink
e2e: move LogFailedContainers out of e2e test framework util.go
Browse files Browse the repository at this point in the history
  • Loading branch information
SataQiu authored and 邱世达 committed Nov 15, 2019
1 parent 97d45fe commit 50bc528
Show file tree
Hide file tree
Showing 43 changed files with 248 additions and 115 deletions.
1 change: 1 addition & 0 deletions test/e2e/BUILD
Expand Up @@ -66,6 +66,7 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
Expand Down
1 change: 1 addition & 0 deletions test/e2e/apimachinery/BUILD
Expand Up @@ -87,6 +87,7 @@ go_library(
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/utils:go_default_library",
"//test/utils/crd:go_default_library",
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/apimachinery/etcd_failure.go
Expand Up @@ -25,6 +25,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/apps"
"k8s.io/kubernetes/test/e2e/framework"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
Expand All @@ -45,7 +46,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessSSHKeyPresent()

err := framework.RunRC(testutils.RCConfig{
err := e2erc.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: "baz",
Namespace: f.Namespace.Name,
Expand Down
1 change: 1 addition & 0 deletions test/e2e/apps/BUILD
Expand Up @@ -68,6 +68,7 @@ go_library(
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
Expand Down
9 changes: 5 additions & 4 deletions test/e2e/apps/daemon_restart.go
Expand Up @@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
Expand Down Expand Up @@ -209,7 +210,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
Replicas: numPods,
CreatedPods: &[]*v1.Pod{},
}
framework.ExpectNoError(framework.RunRC(config))
framework.ExpectNoError(e2erc.RunRC(config))
replacePods(*config.CreatedPods, existingPods)

stopCh = make(chan struct{})
Expand Down Expand Up @@ -260,7 +261,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status.
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)

// Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really?
Expand Down Expand Up @@ -291,9 +292,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill()
// This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
restarter.waitUp()
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
})

ginkgo.It("Kubelet should not restart containers across restart", func() {
Expand Down
1 change: 1 addition & 0 deletions test/e2e/autoscaling/BUILD
Expand Up @@ -44,6 +44,7 @@ go_library(
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils:go_default_library",
Expand Down
17 changes: 9 additions & 8 deletions test/e2e/autoscaling/cluster_autoscaler_scalability.go
Expand Up @@ -32,6 +32,7 @@ import (
"k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"

Expand Down Expand Up @@ -345,8 +346,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
timeToWait := 5 * time.Minute
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)

// Ensure that no new nodes have been added so far.
readyNodeCount, _ := e2enode.TotalReady(f.ClientSet)
Expand Down Expand Up @@ -379,7 +380,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
// run rc based on config
ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
start := time.Now()
framework.ExpectNoError(framework.RunRC(*config.extraPods))
framework.ExpectNoError(e2erc.RunRC(*config.extraPods))
// check results
if tolerateMissingNodeCount > 0 {
// Tolerate some number of nodes not to be created.
Expand All @@ -397,7 +398,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
}
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name)
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name)
}
}

Expand Down Expand Up @@ -475,10 +476,10 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
HostPorts: map[string]int{"port1": port},
MemRequest: request,
}
err := framework.RunRC(*config)
err := e2erc.RunRC(*config)
framework.ExpectNoError(err)
return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
}

Expand Down Expand Up @@ -515,10 +516,10 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
// Create the target RC
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
framework.ExpectNoError(framework.RunRC(*rcConfig))
framework.ExpectNoError(e2erc.RunRC(*rcConfig))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
}

Expand Down

0 comments on commit 50bc528

Please sign in to comment.