forked from openshift/origin
/
multistage.go
121 lines (109 loc) · 3.88 KB
/
multistage.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
package builds
import (
"context"
"fmt"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
buildv1 "github.com/openshift/api/build/v1"
eximages "github.com/openshift/origin/test/extended/images"
exutil "github.com/openshift/origin/test/extended/util"
)
var _ = g.Describe("[sig-builds][Feature:Builds] Multi-stage image builds", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("build-multistage")
testDockerfile = `
FROM scratch as test
USER 1001
FROM centos:7
COPY --from=test /usr/bin/curl /test/
COPY --from=busybox:latest /bin/echo /test/
COPY --from=busybox:latest /bin/ping /test/
`
)
g.Context("", func() {
g.AfterEach(func() {
if g.CurrentGinkgoTestDescription().Failed {
exutil.DumpPodStates(oc)
exutil.DumpConfigMapStates(oc)
exutil.DumpPodLogsStartingWith("", oc)
}
})
g.It("should succeed", func() {
g.By("creating a build directly")
registryURL, err := eximages.GetDockerRegistryURL(oc)
o.Expect(err).NotTo(o.HaveOccurred())
build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Create(context.Background(), &buildv1.Build{
ObjectMeta: metav1.ObjectMeta{
Name: "multi-stage",
},
Spec: buildv1.BuildSpec{
CommonSpec: buildv1.CommonSpec{
Source: buildv1.BuildSource{
Dockerfile: &testDockerfile,
Images: []buildv1.ImageSource{
{From: corev1.ObjectReference{Kind: "DockerImage", Name: "centos:7"}, As: []string{"scratch"}},
},
},
Strategy: buildv1.BuildStrategy{
DockerStrategy: &buildv1.DockerBuildStrategy{},
},
Output: buildv1.BuildOutput{
To: &corev1.ObjectReference{
Kind: "DockerImage",
Name: fmt.Sprintf("%s/%s/multi-stage:v1", registryURL, oc.Namespace()),
},
},
},
},
}, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
result := exutil.NewBuildResult(oc, build)
err = exutil.WaitForBuildResult(oc.AdminBuildClient().BuildV1().Builds(oc.Namespace()), result)
o.Expect(err).NotTo(o.HaveOccurred())
pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(context.Background(), build.Name+"-build", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result.BuildSuccess).To(o.BeTrue(), "Build did not succeed: %#v", result)
s, err := result.Logs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(s).ToNot(o.ContainSubstring("--> FROM scratch"))
o.Expect(s).ToNot(o.ContainSubstring("FROM busybox"))
o.Expect(s).To(o.ContainSubstring("STEP 1: FROM centos:7 AS test"))
o.Expect(s).To(o.ContainSubstring("COPY --from"))
o.Expect(s).To(o.ContainSubstring(fmt.Sprintf("\"OPENSHIFT_BUILD_NAMESPACE\"=\"%s\"", oc.Namespace())))
e2e.Logf("Build logs:\n%s", result)
c := oc.KubeFramework().PodClient()
pod = c.Create(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Name: "run",
Image: fmt.Sprintf("%s/%s/multi-stage:v1", registryURL, oc.Namespace()),
Command: []string{"/test/curl", "-k", "https://kubernetes.default.svc"},
},
{
Name: "check",
Image: fmt.Sprintf("%s/%s/multi-stage:v1", registryURL, oc.Namespace()),
Command: []string{"ls", "/test/"},
},
},
},
})
c.WaitForSuccess(pod.Name, e2e.PodStartTimeout)
data, err := oc.Run("logs").Args("-f", "test", "-c", "run").Output()
o.Expect(err).NotTo(o.HaveOccurred())
m, err := oc.Run("logs").Args("-f", "test", "-c", "check").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(m).To(o.ContainSubstring("echo"))
o.Expect(m).To(o.ContainSubstring("ping"))
e2e.Logf("Pod logs:\n%s\n%s", string(data), string(m))
})
})
})