forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 1
/
contextdir.go
127 lines (104 loc) · 4.79 KB
/
contextdir.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
package builds
import (
"fmt"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
imageeco "github.com/openshift/origin/test/extended/image_ecosystem"
exutil "github.com/openshift/origin/test/extended/util"
)
var _ = g.Describe("[Feature:Builds][Slow] builds with a context directory", func() {
defer g.GinkgoRecover()
var (
appFixture = exutil.FixturePath("testdata", "builds", "test-context-build.json")
oc = exutil.NewCLI("contextdir", exutil.KubeConfigPath())
s2iBuildConfigName = "s2icontext"
s2iBuildName = "s2icontext-1"
dcName = "frontend"
deploymentName = "frontend-1"
dcLabel = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", deploymentName))
serviceName = "frontend"
dockerBuildConfigName = "dockercontext"
dockerBuildName = "dockercontext-1"
)
g.Context("", func() {
g.BeforeEach(func() {
exutil.DumpDockerInfo()
})
g.AfterEach(func() {
if g.CurrentGinkgoTestDescription().Failed {
exutil.DumpPodStates(oc)
exutil.DumpPodLogsStartingWith("", oc)
}
})
g.Describe("s2i context directory build", func() {
g.It(fmt.Sprintf("should s2i build an application using a context directory"), func() {
exutil.CheckOpenShiftNamespaceImageStreams(oc)
g.By(fmt.Sprintf("calling oc create -f %q", appFixture))
err := oc.Run("create").Args("-f", appFixture).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("starting a build")
err = oc.Run("start-build").Args(s2iBuildConfigName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("waiting for build to finish")
err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), s2iBuildName, exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil)
if err != nil {
exutil.DumpBuildLogs("s2icontext", oc)
}
o.Expect(err).NotTo(o.HaveOccurred())
// oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete,
// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
g.By("waiting for a deployment")
err = exutil.WaitForDeploymentConfig(oc.KubeClient(), oc.AppsClient().AppsV1(), oc.Namespace(), dcName, 1, true, oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("waiting for endpoint")
err = e2e.WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), serviceName)
o.Expect(err).NotTo(o.HaveOccurred())
assertPageContent := func(content string) {
_, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 2*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
result, err := imageeco.CheckPageContains(oc, "frontend", "", content)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
}
g.By("testing application content")
assertPageContent("Hello world!")
g.By("checking the pod count")
pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: dcLabel.String()})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(pods.Items)).To(o.Equal(1))
g.By("expecting the pod not to contain two copies of the source")
pod := pods.Items[0]
out, err := oc.Run("exec").Args(pod.Name, "-c", pod.Spec.Containers[0].Name, "--", "ls", "/opt/app-root/src").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).NotTo(o.ContainSubstring("2.3"))
})
})
g.Describe("docker context directory build", func() {
g.It(fmt.Sprintf("should docker build an application using a context directory"), func() {
g.By("initializing local repo")
repo, err := exutil.NewGitRepo("contextdir")
o.Expect(err).NotTo(o.HaveOccurred())
defer repo.Remove()
err = repo.AddAndCommit("2.3/Dockerfile", "FROM busybox")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.CheckOpenShiftNamespaceImageStreams(oc)
g.By(fmt.Sprintf("calling oc create -f %q", appFixture))
err = oc.Run("create").Args("-f", appFixture).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("starting a build")
err = oc.Run("start-build").Args(dockerBuildConfigName, "--from-repo", repo.RepoPath).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// build will fail if we don't use the right context dir because there won't be a dockerfile present.
g.By("waiting for build to finish")
err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), dockerBuildName, exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil)
if err != nil {
exutil.DumpBuildLogs("dockercontext", oc)
}
o.Expect(err).NotTo(o.HaveOccurred())
})
})
})
})