Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e reboot: print status and logs for not running/ready pods #20108

Merged
merged 1 commit into from
Jan 26, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 0 additions & 14 deletions test/e2e/downwardapi_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,9 @@ package e2e

import (
"fmt"
"strings"
"time"

"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util"

. "github.com/onsi/ginkgo"
Expand All @@ -32,18 +30,6 @@ import (
// How long to wait for a log pod to be displayed
const podLogTimeout = 45 * time.Second

// utility function for gomega Eventually
func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
logs, err := c.Get().Resource("pods").Namespace(namespace).Name(podName).SubResource("log").Param("container", containerName).Do().Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Internal Error")
}
return string(logs), err
}

var _ = Describe("Downward API volume", func() {
f := NewFramework("downward-api")
It("should provide podname only [Conformance]", func() {
Expand Down
37 changes: 37 additions & 0 deletions test/e2e/reboot.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,41 @@ func testReboot(c *client.Client, rebootCmd string) {
}
}

func printStatusAndLogsForNotReadyPods(c *client.Client, oldPods, newPods []*api.Pod) {
printFn := func(id, log string, err error, previous bool) {
prefix := "Retrieving log for container"
if previous {
prefix = "Retrieving log for the last terminated container"
}
if err != nil {
Logf("%s %s, err: %v:\n%s\n", prefix, id, log)
} else {
Logf("%s %s:\n%s\n", prefix, id, log)
}
}
for _, oldPod := range oldPods {
for _, p := range newPods {
if p.Namespace != oldPod.Namespace || p.Name != oldPod.Name {
continue
}
if ok, _ := podRunningReady(p); !ok {
Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
// Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
log, err := getPodLogs(c, p.Namespace, p.Name, container.Name)
printFn(cIdentifer, log, err, false)
// Get log from the previous container.
if container.RestartCount > 0 {
printFn(cIdentifer, log, err, true)
}
}
}
break
}
}
}

// rebootNode takes node name on provider through the following steps using c:
// - ensures the node is ready
// - ensures all pods on the node are running and ready
Expand Down Expand Up @@ -229,6 +264,8 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
// Ensure all of the pods that we found on this node before the reboot are
// running / healthy.
if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
newPods := ps.List()
printStatusAndLogsForNotReadyPods(c, pods, newPods)
return false
}

Expand Down
45 changes: 31 additions & 14 deletions test/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -1338,36 +1338,26 @@ func testContainerOutputMatcher(scenarioName string,

By(fmt.Sprintf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
var logs []byte
var logs string
start := time.Now()

// Sometimes the actual containers take a second to get started, try to get logs for 60s
for time.Now().Sub(start) < (60 * time.Second) {
err = nil
logs, err = c.Get().
Resource("pods").
Namespace(ns).
Name(pod.Name).
SubResource("log").
Param("container", containerName).
Do().
Raw()
if err == nil && strings.Contains(string(logs), "Internal Error") {
err = fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
logs, err = getPodLogs(c, ns, pod.Name, containerName)
if err != nil {
By(fmt.Sprintf("Warning: Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
time.Sleep(5 * time.Second)
continue

}
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", string(logs)))
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", logs))
break
}

for _, m := range expectedOutput {
Expect(string(logs)).To(matcher(m), "%q in container output", m)
Expect(logs).To(matcher(m), "%q in container output", m)
}
}

Expand Down Expand Up @@ -2715,3 +2705,30 @@ func scaleRCByName(client *client.Client, ns, name string, replicas uint) error
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector)))
}
}

func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}

func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}

// utility function for gomega Eventually
func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}