-
Notifications
You must be signed in to change notification settings - Fork 74
/
e2e_test.go
179 lines (153 loc) · 6.73 KB
/
e2e_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
// Copyright 2021 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// nolint: testpackage
package e2e
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)
// creating a workload cluster
// This test is meant to provide a first, fast signal to detect regression; it is recommended to use it as a PR blocker test.
var _ = Describe("When BYOH joins existing cluster [PR-Blocking]", func() {
var (
ctx context.Context
specName = "quick-start"
namespace *corev1.Namespace
clusterName string
cancelWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
dockerClient *client.Client
err error
byohostContainerIDs []string
agentLogFile1 = "/tmp/host-agent1.log"
agentLogFile2 = "/tmp/host-agent2.log"
byoHostName1 = "byohost1"
byoHostName2 = "byohost2"
)
BeforeEach(func() {
ctx = context.TODO()
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
Expect(e2eConfig).NotTo(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(bootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName)
Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion))
// set up a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder)
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})
It("Should create a workload cluster with single BYOH host", func() {
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
dockerClient, err = client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
Expect(err).NotTo(HaveOccurred())
runner := ByoHostRunner{
Context: ctx,
clusterConName: clusterConName,
Namespace: namespace.Name,
PathToHostAgentBinary: pathToHostAgentBinary,
DockerClient: dockerClient,
NetworkInterface: "kind",
bootstrapClusterProxy: bootstrapClusterProxy,
CommandArgs: map[string]string{
"--bootstrap-kubeconfig": "/bootstrap.conf",
"--namespace": namespace.Name,
"--v": "1",
},
}
var output types.HijackedResponse
runner.ByoHostName = byoHostName1
runner.BootstrapKubeconfigData = generateBootstrapKubeconfig(runner.Context, bootstrapClusterProxy, clusterConName)
byohost, err := runner.SetupByoDockerHost()
Expect(err).NotTo(HaveOccurred())
output, byohostContainerID, err := runner.ExecByoDockerHost(byohost)
Expect(err).NotTo(HaveOccurred())
defer output.Close()
byohostContainerIDs = append(byohostContainerIDs, byohostContainerID)
f := WriteDockerLog(output, agentLogFile1)
defer func() {
deferredErr := f.Close()
if deferredErr != nil {
Showf("error closing file %s: %v", agentLogFile1, deferredErr)
}
}()
runner.ByoHostName = byoHostName2
runner.BootstrapKubeconfigData = generateBootstrapKubeconfig(runner.Context, bootstrapClusterProxy, clusterConName)
byohost, err = runner.SetupByoDockerHost()
Expect(err).NotTo(HaveOccurred())
output, byohostContainerID, err = runner.ExecByoDockerHost(byohost)
Expect(err).NotTo(HaveOccurred())
defer output.Close()
byohostContainerIDs = append(byohostContainerIDs, byohostContainerID)
// read the log of host agent container in backend, and write it
f = WriteDockerLog(output, agentLogFile2)
defer func() {
deferredErr := f.Close()
if deferredErr != nil {
Showf("error closing file %s: %v", agentLogFile2, deferredErr)
}
}()
setControlPlaneIP(context.Background(), dockerClient)
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: clusterctl.DefaultFlavor,
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64(1),
WorkerMachineCount: pointer.Int64(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, clusterResources)
})
JustAfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
ShowInfo([]string{agentLogFile1, agentLogFile2})
}
})
AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, cancelWatches, clusterResources.Cluster, e2eConfig.GetIntervals, skipCleanup)
if dockerClient != nil && len(byohostContainerIDs) != 0 {
for _, byohostContainerID := range byohostContainerIDs {
err := dockerClient.ContainerStop(ctx, byohostContainerID, container.StopOptions{})
Expect(err).NotTo(HaveOccurred())
err = dockerClient.ContainerRemove(ctx, byohostContainerID, types.ContainerRemoveOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
err := os.Remove(agentLogFile1)
if err != nil {
Showf("error removing file %s: %v", agentLogFile1, err)
}
err = os.Remove(agentLogFile2)
if err != nil {
Showf("error removing file %s: %v", agentLogFile2, err)
}
err = os.Remove(ReadByohControllerManagerLogShellFile)
if err != nil {
Showf("error removing file %s: %v", ReadByohControllerManagerLogShellFile, err)
}
err = os.Remove(ReadAllPodsShellFile)
if err != nil {
Showf("error removing file %s: %v", ReadAllPodsShellFile, err)
}
})
})