Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix 7 broken example e2e tests #27577

Merged
merged 2 commits into from
Jun 21, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/redis/redis-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ spec:
spec:
containers:
- name: redis
image: kubernetes/redis:v2
image: kubernetes/redis:v1
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any chance we can move these images somewhere in gcr.io?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixing in #27805

ports:
- containerPort: 6379
resources:
Expand Down
4 changes: 2 additions & 2 deletions examples/redis/redis-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ metadata:
spec:
containers:
- name: master
image: kubernetes/redis:v2
image: kubernetes/redis:v1
env:
- name: MASTER
value: "true"
Expand All @@ -22,7 +22,7 @@ spec:
- mountPath: /redis-master-data
name: data
- name: sentinel
image: kubernetes/redis:v2
image: kubernetes/redis:v1
env:
- name: SENTINEL
value: "true"
Expand Down
2 changes: 1 addition & 1 deletion examples/redis/redis-sentinel-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: sentinel
image: kubernetes/redis:v2
image: kubernetes/redis:v1
env:
- name: SENTINEL
value: "true"
Expand Down
1 change: 0 additions & 1 deletion examples/spark/spark-master-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ kind: ReplicationController
apiVersion: v1
metadata:
name: spark-master-controller
namespace: spark-cluster
spec:
replicas: 1
selector:
Expand Down
1 change: 0 additions & 1 deletion examples/spark/spark-master-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ kind: Service
apiVersion: v1
metadata:
name: spark-master
namespace: spark-cluster
spec:
ports:
- port: 7077
Expand Down
1 change: 0 additions & 1 deletion examples/spark/spark-worker-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ kind: ReplicationController
apiVersion: v1
metadata:
name: spark-worker-controller
namespace: spark-cluster
spec:
replicas: 2
selector:
Expand Down
47 changes: 27 additions & 20 deletions test/e2e/examples.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,18 @@ const (

var _ = framework.KubeDescribe("[Feature:Example]", func() {
f := framework.NewDefaultFramework("examples")
// Customized ForEach wrapper for this test.
forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) {
f.NewClusterVerification(

// Reusable cluster state function. This won't be adversly affected by lazy initialization of framework.
clusterState := func(selectorKey string, selectorValue string) *framework.ClusterVerification {
return f.NewClusterVerification(
framework.PodStateVerification{
Selectors: map[string]string{selectorKey: selectorValue},
ValidPhases: []api.PodPhase{api.PodRunning},
}).ForEach(fn)
})
}
// Customized ForEach wrapper for this test.
forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) {
clusterState(selectorKey, selectorValue).ForEach(fn)
}
var c *client.Client
var ns string
Expand Down Expand Up @@ -86,7 +91,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.RunKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": sentinelRC}))
label := labels.SelectorFromSet(labels.Set(map[string]string{sentinelRC: "true"}))
err = framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
label = labels.SelectorFromSet(labels.Set(map[string]string{"name": redisRC}))
Expand All @@ -101,13 +106,21 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {

By("checking up the services")
checkAllLogs := func() {
forEachPod("name", "redis", func(pod api.Pod) {
selectorKey, selectorValue := "name", redisRC
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
forEachPod("name", "redis-sentinel", func(pod api.Pod) {
selectorKey, selectorValue = sentinelRC, "true"
label = labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -182,11 +195,6 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
return filepath.Join(framework.TestContext.RepoRoot, "examples", "spark", file)
}

// Override test-generated namespace to be as specified in Spark example
ns = "spark-cluster"
namespaceYaml := mkpath("namespace-spark-cluster.yaml")
framework.RunKubectlOrDie("create", "-f", namespaceYaml)

// TODO: Add Zepplin and Web UI to this example.
serviceYaml := mkpath("spark-master-service.yaml")
masterYaml := mkpath("spark-master-controller.yaml")
Expand All @@ -197,14 +205,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("starting master")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "spark-master"}))
selectorKey, selectorValue := "component", "spark-master"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())

framework.Logf("Now polling for Master startup...")

// Only one master pod: But its a natural way to look up pod names.
forEachPod("component", "spark-master", func(pod api.Pod) {
forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
framework.Logf("Now waiting for master to startup in %v", pod.Name)
_, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
Expand All @@ -213,7 +221,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("waiting for master endpoint")
err = framework.WaitForEndpoint(c, ns, "spark-master")
Expect(err).NotTo(HaveOccurred())
forEachPod("component", "spark-master", func(pod api.Pod) {
forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
_, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
if maErr != nil {
framework.Failf("Didn't find target string. error:", maErr)
Expand All @@ -224,17 +232,16 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("starting workers")
framework.Logf("Now starting Workers")
framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "spark-worker"}))
selectorKey, selectorValue := "component", "spark-worker"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())

// For now, scaling is orthogonal to the core test.
// framework.ScaleRC(c, ns, "spark-worker-controller", 2, true)

framework.Logf("Now polling for worker startup...")
// ScaleRC(c, ns, "spark-worker-controller", 2, true)
framework.Logf("Now polling for worker startup...")
forEachPod("component", "spark-worker",
forEachPod(selectorKey, selectorValue,
func(pod api.Pod) {
_, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
Expect(slaveErr).NotTo(HaveOccurred())
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -3913,7 +3913,7 @@ func WaitForIngressAddress(c *client.Client, ns, ingName string, timeout time.Du
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("log", podName, container, fmt.Sprintf("--namespace=%v", ns))
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}

Expand Down