forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
scl.go
146 lines (126 loc) · 4.99 KB
/
scl.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
package image_ecosystem
import (
"fmt"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
kapiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/client/conditions"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/origin/test/extended/util"
)
func getPodNameForTest(image string, t tc) string {
return fmt.Sprintf("%s-%s-centos7", image, t.Version)
}
// defineTest will create the gingko test. This ensures the test
// is created with a local copy of all variables the test will need,
// since the test may not run immediately and may run in parallel with other
// tests, so sharing a variable reference is problematic. (Sharing the oc client
// is ok for these tests).
func defineTest(image string, t tc, oc *exutil.CLI) {
g.Describe("returning s2i usage when running the image", func() {
g.It(fmt.Sprintf("%q should print the usage", t.DockerImageReference), func() {
g.By(fmt.Sprintf("creating a sample pod for %q", t.DockerImageReference))
pod := exutil.GetPodForContainer(kapiv1.Container{
Name: "test",
Image: t.DockerImageReference,
})
_, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Create(pod)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("waiting for the pod to be running")
err = oc.KubeFramework().WaitForPodRunningSlow(pod.Name)
if err != nil {
p, e := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pod.Name, metav1.GetOptions{})
e2e.Logf("error %v waiting for pod %v: ", p, e)
o.Expect(err).To(o.Equal(conditions.ErrPodCompleted))
}
g.By("checking the log of the pod")
err = wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) {
log, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).GetLogs(pod.Name, &kapiv1.PodLogOptions{}).DoRaw()
if err != nil {
return false, err
}
e2e.Logf("got log %v from pod %v", string(log), pod.Name)
if strings.Contains(string(log), "Sample invocation") {
return true, nil
}
return false, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
})
})
g.Describe("using the SCL in s2i images", func() {
g.It(fmt.Sprintf("%q should be SCL enabled", t.DockerImageReference), func() {
g.By(fmt.Sprintf("creating a sample pod for %q with /bin/bash -c command", t.DockerImageReference))
pod := exutil.GetPodForContainer(kapiv1.Container{
Image: t.DockerImageReference,
Name: "test",
Command: []string{"/bin/bash", "-c", t.Cmd},
})
_, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Create(pod)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.KubeFramework().WaitForPodRunningSlow(pod.Name)
if err != nil {
p, e := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pod.Name, metav1.GetOptions{})
e2e.Logf("error %v waiting for pod %v: ", p, e)
o.Expect(err).To(o.Equal(conditions.ErrPodCompleted))
}
g.By("checking the log of the pod")
err = wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) {
log, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).GetLogs(pod.Name, &kapiv1.PodLogOptions{}).DoRaw()
if err != nil {
return false, err
}
e2e.Logf("got log %v from pod %v", string(log), pod.Name)
if strings.Contains(string(log), t.Expected) {
return true, nil
}
return false, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("creating a sample pod for %q", t.DockerImageReference))
pod = exutil.GetPodForContainer(kapiv1.Container{
Image: t.DockerImageReference,
Name: "test",
Command: []string{"/usr/bin/sleep", "infinity"},
})
_, err = oc.KubeClient().CoreV1().Pods(oc.Namespace()).Create(pod)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.KubeFramework().WaitForPodRunningSlow(pod.Name)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("calling the binary using 'oc exec /bin/bash -c'")
out, err := oc.Run("exec").Args("-p", pod.Name, "--", "/bin/bash", "-c", t.Cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring(t.Expected))
g.By("calling the binary using 'oc exec /bin/sh -ic'")
out, err = oc.Run("exec").Args("-p", pod.Name, "--", "/bin/sh", "-ic", t.Cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring(t.Expected))
})
})
}
var _ = g.Describe("[image_ecosystem][Slow] openshift images should be SCL enabled", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("s2i-usage", exutil.KubeConfigPath())
g.Context("", func() {
g.JustBeforeEach(func() {
g.By("waiting for builder service account")
err := exutil.WaitForBuilderAccount(oc.KubeClient().CoreV1().ServiceAccounts(oc.Namespace()))
o.Expect(err).NotTo(o.HaveOccurred())
})
g.AfterEach(func() {
if g.CurrentGinkgoTestDescription().Failed {
exutil.DumpPodStates(oc)
exutil.DumpPodLogsStartingWith("", oc)
}
})
for image, tcs := range GetTestCaseForImages() {
for _, t := range tcs {
defineTest(image, t, oc)
}
}
})
})