Skip to content

Commit

Permalink
Merge pull request #28760 from kubernetes/revert-28015-workaround_kub…
Browse files Browse the repository at this point in the history
…eproxy

Automatic merge from submit-queue

Revert "Workardound KubeProxy failures in test framework"

Reverts #28015

For #25543.
Revert walkaround in test framework to verify whether #28697 solved the problem.

@wojtek-t
  • Loading branch information
k8s-merge-robot committed Jul 12, 2016
2 parents 6a4de09 + f68acf6 commit 9467c21
Show file tree
Hide file tree
Showing 5 changed files with 5 additions and 39 deletions.
2 changes: 1 addition & 1 deletion test/e2e/e2e.go
Expand Up @@ -125,7 +125,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels, true); err != nil {
if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels); err != nil {
framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
framework.LogFailedContainers(c, api.NamespaceSystem)
framework.RunKubernetesServiceTestContainer(c, framework.TestContext.RepoRoot, api.NamespaceDefault)
Expand Down
36 changes: 1 addition & 35 deletions test/e2e/framework/util.go
Expand Up @@ -562,7 +562,7 @@ func WaitForPodsSuccess(c *client.Client, ns string, successPodLabels map[string
// even if there are minPods pods, some of which are in Running/Ready
// and some in Success. This is to allow the client to decide if "Success"
// means "Ready" or not.
func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string, restartDockerOnFailures bool) error {
func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
Expand All @@ -575,10 +575,6 @@ func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout
wg.Done()
}()

// We will be restarting all not-ready kubeProxies every 5 minutes,
// to workaround #25543 issue.
badKubeProxySince := make(map[string]time.Time)

if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods and replication controllers in every
// iteration because more pods come online during startup and we want to
Expand Down Expand Up @@ -609,8 +605,6 @@ func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout
if hasReplicationControllersForPod(rcList, pod) {
replicaOk++
}
// If the pod is healthy, remove it from bad ones.
delete(badKubeProxySince, pod.Name)
} else {
if pod.Status.Phase != api.PodFailed {
Logf("The status of Pod %s is %s, waiting for it to be either Running or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
Expand All @@ -623,34 +617,6 @@ func WaitForPodsRunningReady(c *client.Client, ns string, minPods int32, timeout
}
}

// Try to repair all KubeProxies that are not-ready long enough by restarting Docker:
// see https://github.com/kubernetes/kubernetes/issues/24295#issuecomment-218920357
// for exact details.
if restartDockerOnFailures {
for _, badPod := range badPods {
name := badPod.Name
if len(name) > 10 && name[:10] == "kube-proxy" {
if _, ok := badKubeProxySince[name]; !ok {
badKubeProxySince[name] = time.Now()
}
if time.Since(badKubeProxySince[name]) > 5*time.Minute {
node, err := c.Nodes().Get(badPod.Spec.NodeName)
if err != nil {
Logf("Couldn't get node: %v", err)
continue
}
err = IssueSSHCommand("sudo service docker restart", TestContext.Provider, node)
if err != nil {
Logf("Couldn't restart docker on %s: %v", name, err)
continue
}
Logf("Docker on %s node restarted", badPod.Spec.NodeName)
delete(badKubeProxySince, name)
}
}
}
}

Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/mesos.go
Expand Up @@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Mesos", func() {

const ns = "static-pods"
numpods := int32(len(nodelist.Items))
framework.ExpectNoError(framework.WaitForPodsRunningReady(client, ns, numpods, wait.ForeverTestTimeout, map[string]string{}, false),
framework.ExpectNoError(framework.WaitForPodsRunningReady(client, ns, numpods, wait.ForeverTestTimeout, map[string]string{}),
fmt.Sprintf("number of static pods in namespace %s is %d", ns, numpods))
})

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/resize_nodes.go
Expand Up @@ -424,7 +424,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
By("waiting for system pods to successfully restart")
err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels, false)
err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
By("waiting for image prepulling pods to complete")
framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/scheduler_predicates.go
Expand Up @@ -222,7 +222,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}
}

err = framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels, false)
err = framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())

for _, node := range nodeList.Items {
Expand Down

0 comments on commit 9467c21

Please sign in to comment.