Skip to content

Commit

Permalink
Merge pull request #7839 from jayunit100/density-verify-param
Browse files Browse the repository at this point in the history
E2E: Parameterize Density failure
  • Loading branch information
wojtek-t committed May 7, 2015
2 parents 600b00f + 7d4249d commit 5200aa1
Showing 1 changed file with 23 additions and 8 deletions.
31 changes: 23 additions & 8 deletions test/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"bytes"
"fmt"
"io/ioutil"
"math"
"math/rand"
"os"
"os/exec"
Expand Down Expand Up @@ -444,11 +445,14 @@ func DeleteRC(c *client.Client, ns, name string) error {
return nil
}

// Launch a Replication Controller and wait for all pods it spawns
// to become running. The controller will need to be cleaned up external
// to this method
// RunRC Launches (and verifies correctness) of a Replication Controller
// It will waits for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling cleanup).
func RunRC(c *client.Client, name string, ns, image string, replicas int) error {
var last int

maxContainerFailures := int(math.Max(1.0, float64(replicas)*.01))
current := 0
same := 0

Expand Down Expand Up @@ -517,7 +521,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error
}
Logf("Controller %s: Found %d pods out of %d", name, current, replicas)

By("Waiting for each pod to be running")
By(fmt.Sprintf("Waiting for all %d replicas to be running with a max container failures of %d", replicas, maxContainerFailures))
same = 0
last = 0
failCount = 10
Expand All @@ -539,7 +543,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error
for _, p := range currentPods.Items {
if p.Status.Phase == api.PodRunning {
current++
if err := VerifyContainersAreNotFailed(p); err != nil {
if err := VerifyContainersAreNotFailed(p, maxContainerFailures); err != nil {
return err
}
} else if p.Status.Phase == api.PodPending {
Expand Down Expand Up @@ -584,16 +588,27 @@ func listPods(c *client.Client, namespace string, label labels.Selector, field f
return pods, err
}

func VerifyContainersAreNotFailed(pod api.Pod) error {
//VerifyContainersAreNotFailed confirms that containers didn't enter an invalid state.
//For example, too many restarts, or non nill Termination, and so on.
func VerifyContainersAreNotFailed(pod api.Pod, restartMax int) error {
var errStrings []string

statuses := pod.Status.ContainerStatuses
if len(statuses) == 0 {
return nil
} else {
for _, status := range statuses {
if status.State.Termination != nil || status.LastTerminationState.Termination != nil || status.RestartCount != 0 {
errStrings = append(errStrings, fmt.Sprintf("Error: Pod %s (host: %s) : Container %s was found to have terminated %d times", pod.Name, pod.Spec.Host, status.Name, status.RestartCount))
var errormsg string = ""
if status.State.Termination != nil {
errormsg = "status.State.Termination was nil"
} else if status.LastTerminationState.Termination != nil {
errormsg = "status.LastTerminationState.Termination was nil"
} else if status.RestartCount > restartMax {
errormsg = fmt.Sprintf("restarted %d times", restartMax)
}

if len(errormsg) != 0 {
errStrings = append(errStrings, fmt.Sprintf("Error: Pod %s (host: %s) : Container w/ name %s status was bad (%v).", pod.Name, pod.Spec.Host, status.Name, errormsg))
}
}
}
Expand Down

0 comments on commit 5200aa1

Please sign in to comment.