This repository has been archived by the owner on Sep 28, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 63
/
cl.go
393 lines (356 loc) · 13.3 KB
/
cl.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
package cluster
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
kclientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
reale2e "k8s.io/kubernetes/test/e2e"
e2e "k8s.io/kubernetes/test/e2e/framework"
"github.com/openshift/openshift-tests/test/extended/cluster/metrics"
"github.com/openshift/openshift-tests/test/extended/util"
exutil "github.com/openshift/openshift-tests/test/extended/util"
)
const checkDeleteProjectInterval = 10 * time.Second
const checkDeleteProjectTimeout = 3 * time.Minute
const checkPodRunningTimeout = 5 * time.Minute
// TODO sjug: pass label via config
var podLabelMap = map[string]string{"purpose": "test"}
var rootDir string
var _ = g.Describe("[Feature:Performance][Serial][Slow] Load cluster", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIWithoutNamespace("cl")
masterVertFixture = exutil.FixturePath("testdata", "cluster", "master-vert.yaml")
_ = exutil.FixturePath("testdata", "cluster", "quickstarts", "cakephp-mysql.json")
_ = exutil.FixturePath("testdata", "cluster", "quickstarts", "dancer-mysql.json")
_ = exutil.FixturePath("testdata", "cluster", "quickstarts", "django-postgresql.json")
_ = exutil.FixturePath("testdata", "cluster", "quickstarts", "nodejs-mongodb.json")
_ = exutil.FixturePath("testdata", "cluster", "quickstarts", "rails-postgresql.json")
)
var c kclientset.Interface
g.BeforeEach(func() {
var err error
c = oc.AdminKubeClient()
viperConfig := reale2e.GetViperConfig()
if viperConfig == "" {
e2e.Logf("Undefined config file, using built-in config %v\n", masterVertFixture)
reale2e.SetViperConfig(masterVertFixture)
path := strings.Split(masterVertFixture, "/")
rootDir = strings.Join(path[:len(path)-5], "/")
err = ParseConfig(masterVertFixture, true)
} else {
if _, err := os.Stat(viperConfig); os.IsNotExist(err) {
e2e.Failf("Config file not found: \"%v\"\n", err)
}
e2e.Logf("Using config \"%v\"\n", viperConfig)
err = ParseConfig(viperConfig, false)
}
if err != nil {
e2e.Failf("Error parsing config: %v\n", err)
}
})
g.It("should load the cluster", func() {
project := ConfigContext.ClusterLoader.Projects
tuningSets := ConfigContext.ClusterLoader.TuningSets
sync := ConfigContext.ClusterLoader.Sync
if project == nil {
e2e.Failf("Invalid config file.\nFile: %v", project)
}
var namespaces []string
var steps []metrics.StepDuration
//totalPods := 0 // Keep track of how many pods for stepping
// TODO sjug: add concurrency
testStartTime := time.Now()
for _, p := range project {
// Find tuning if we have it
tuning := GetTuningSet(tuningSets, p.Tuning)
if tuning != nil {
e2e.Logf("Our tuning set is: %v", tuning)
}
for j := 0; j < p.Number; j++ {
var allArgs []string
if p.NodeSelector != "" {
allArgs = append(allArgs, "--node-selector")
allArgs = append(allArgs, p.NodeSelector)
}
nsName := fmt.Sprintf("%s%d", p.Basename, j)
allArgs = append(allArgs, nsName)
projectExists, err := ProjectExists(oc, nsName)
o.Expect(err).NotTo(o.HaveOccurred())
if !projectExists {
e2e.Logf("Project %s does not exist.", nsName)
}
switch p.IfExists {
case IF_EXISTS_REUSE:
e2e.Logf("Configuration requested reuse of project %v", nsName)
case IF_EXISTS_DELETE:
e2e.Logf("Configuration requested deletion of project %v", nsName)
if projectExists {
err = DeleteProject(oc, nsName, checkDeleteProjectInterval, checkDeleteProjectTimeout)
o.Expect(err).NotTo(o.HaveOccurred())
}
default:
e2e.Failf("Unsupported ifexists value '%v' for project %v", p.IfExists, project)
}
if p.IfExists == IF_EXISTS_REUSE && projectExists {
// do nothing
} else {
// Create namespaces as defined in Cluster Loader config
err = oc.Run("adm", "new-project").Args(allArgs...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("%d/%d : Created new namespace: %v", j+1, p.Number, nsName)
}
// label namespace nsName
if p.Labels != nil {
_, err = SetNamespaceLabels(c, nsName, p.Labels)
o.Expect(err).NotTo(o.HaveOccurred())
}
namespaces = append(namespaces, nsName)
// Create config maps
if p.Configmaps != nil {
// Configmaps defined, create them
err := CreateConfigmaps(oc, c, nsName, p.Configmaps)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create secrets
if p.Secrets != nil {
// Secrets defined, create them
err := CreateSecrets(oc, c, nsName, p.Secrets)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create templates as defined
for _, template := range p.Templates {
var rateDelay, stepPause time.Duration
if tuning != nil {
if tuning.Templates.RateLimit.Delay != 0 {
rateDelay = tuning.Templates.RateLimit.Delay
}
if tuning.Templates.Stepping.Pause != 0 {
stepPause = tuning.Templates.Stepping.Pause
}
}
step := metrics.NewTemplateStepDuration(rateDelay, stepPause)
err := CreateTemplates(oc, c, nsName, reale2e.GetViperConfig(), template, tuning, &step)
o.Expect(err).NotTo(o.HaveOccurred())
steps = append(steps, step)
}
// This is too familiar, create pods
for _, pod := range p.Pods {
var path string
var err error
if pod.File != "" {
// Parse Pod file into struct
path, err = mkPath(pod.File, reale2e.GetViperConfig())
o.Expect(err).NotTo(o.HaveOccurred())
}
config, err := ParsePods(path)
o.Expect(err).NotTo(o.HaveOccurred())
// Check if environment variables are defined in CL config
if pod.Parameters == nil {
e2e.Logf("Pod environment variables will not be modified.")
} else {
// Override environment variables for Pod using ConfigMap
configMapName := InjectConfigMap(c, nsName, pod.Parameters, config)
// Cleanup ConfigMap at some point after the Pods are created
defer func() {
_ = c.CoreV1().ConfigMaps(nsName).Delete(configMapName, nil)
}()
}
// TODO sjug: pass label via config
podLabelMap := map[string]string{"purpose": "test"}
var rateDelay, stepPause time.Duration
if tuning != nil {
if tuning.Pods.RateLimit.Delay != 0 {
rateDelay = tuning.Pods.RateLimit.Delay
}
if tuning.Pods.Stepping.Pause != 0 {
stepPause = tuning.Pods.Stepping.Pause
}
}
step := metrics.NewPodStepDuration(rateDelay, stepPause)
err = pod.CreatePods(c, nsName, podLabelMap, config.Spec, tuning, &step)
steps = append(steps, step)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}
if sync.Running {
timeout, err := time.ParseDuration(sync.Timeout)
o.Expect(err).NotTo(o.HaveOccurred())
for _, ns := range namespaces {
err := SyncRunningPods(c, ns, sync.Selectors, timeout)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
if sync.Server.Enabled {
var podCount PodCount
err := Server(&podCount, sync.Server.Port, false)
o.Expect(err).NotTo(o.HaveOccurred())
}
if sync.Succeeded {
timeout, err := time.ParseDuration(sync.Timeout)
o.Expect(err).NotTo(o.HaveOccurred())
for _, ns := range namespaces {
err := SyncSucceededPods(c, ns, sync.Selectors, timeout)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
if err := postCreateWait(oc, namespaces); err != nil {
e2e.Failf("Error in postCreateWait: %v", err)
}
// Calculate and log test duration
m := []metrics.Metrics{metrics.NewTestDuration("cluster-loader-test", testStartTime, time.Since(testStartTime), steps)}
err := metrics.LogMetrics(m)
o.Expect(err).NotTo(o.HaveOccurred())
// If config context set to cleanup on completion
if ConfigContext.ClusterLoader.Cleanup == true {
for _, ns := range namespaces {
e2e.Logf("Deleting project %s", ns)
err := oc.AsAdmin().KubeClient().CoreV1().Namespaces().Delete(ns, nil)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
})
})
// postCreateWait looks for RCs, pods, builds, and DCs to ensure they're in a good state in each namespace
func postCreateWait(oc *util.CLI, namespaces []string) error {
// Wait for builds and deployments to complete
for _, ns := range namespaces {
rcList, err := oc.AdminKubeClient().CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("Error listing RCs: %v", err)
}
rcCount := len(rcList.Items)
if rcCount > 0 {
e2e.Logf("Waiting for %d RCs in namespace %s", rcCount, ns)
for _, rc := range rcList.Items {
e2e.Logf("Waiting for RC: %s", rc.Name)
err := waitForRCToStabilize(oc.AdminKubeClient(), ns, rc.Name, checkPodRunningTimeout)
if err != nil {
return fmt.Errorf("Error in waiting for RC to stabilize: %v", err)
}
err = WaitForRCReady(oc, ns, rc.Name, checkPodRunningTimeout)
if err != nil {
return fmt.Errorf("Error in waiting for RC to become ready: %v", err)
}
}
}
podLabels := exutil.ParseLabelsOrDie(mapToString(podLabelMap))
podList, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: podLabels.String()})
if err != nil {
return fmt.Errorf("Error in listing pods: %v", err)
}
podCount := len(podList.Items)
if podCount > 0 {
e2e.Logf("Waiting for %d pods in namespace %s", podCount, ns)
c := oc.AdminKubeClient()
pods, err := exutil.WaitForPods(c.CoreV1().Pods(ns), podLabels, exutil.CheckPodIsRunning, podCount, checkPodRunningTimeout)
if err != nil {
return fmt.Errorf("Error in pod wait: %v", err)
} else if len(pods) < podCount {
return fmt.Errorf("Only got %v out of %v pods in %s (timeout)", len(pods), podCount, checkPodRunningTimeout)
}
e2e.Logf("All pods in namespace %s running", ns)
}
buildList, err := oc.AsAdmin().BuildClient().BuildV1().Builds(ns).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("Error in listing builds: %v", err)
}
e2e.Logf("Build List: %+v", buildList)
if len(buildList.Items) > 0 {
// Get first build name
buildName := buildList.Items[0].Name
e2e.Logf("Waiting for build: %q", buildName)
err = exutil.WaitForABuild(oc.AsAdmin().BuildClient().BuildV1().Builds(ns), buildName, nil, nil, nil)
if err != nil {
exutil.DumpBuildLogs(buildName, oc)
return fmt.Errorf("Error in waiting for build: %v", err)
}
e2e.Logf("Build %q completed", buildName)
}
dcList, err := oc.AsAdmin().AppsClient().AppsV1().DeploymentConfigs(ns).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("Error listing DeploymentConfigs: %v", err)
}
if len(dcList.Items) > 0 {
// Get first deployment config name
deploymentName := dcList.Items[0].Name
e2e.Logf("Waiting for deployment: %q", deploymentName)
err = exutil.WaitForDeploymentConfig(oc.AdminKubeClient(), oc.AsAdmin().AppsClient().AppsV1(), ns, deploymentName, 1, true, oc)
if err != nil {
return fmt.Errorf("Error in waiting for DeploymentConfigs: %v", err)
}
e2e.Logf("Deployment %q completed", deploymentName)
}
}
return nil
}
// mkPath returns fully qualfied file path as a string
func mkPath(filename, config string) (string, error) {
// Use absolute path if provided in config
if filepath.IsAbs(filename) {
return filename, nil
}
// Handle an empty filename.
if filename == "" {
return "", fmt.Errorf("no template file defined!")
}
var searchPaths []string
workingDir, err := os.Getwd()
if err != nil {
return "", err
}
configDir := filepath.Dir(config)
searchPaths = append(searchPaths, filepath.Join(workingDir, filename))
searchPaths = append(searchPaths, filepath.Join(configDir, filename))
for _, v := range searchPaths {
if _, err := os.Stat(v); err == nil {
return v, nil
}
}
return "", fmt.Errorf("unable to find pod/template file %s\n", filename)
}
// waitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func waitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
options := metav1.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector().String()}
w, err := c.CoreV1().ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *v1.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
*(rc.Spec.Replicas) == rc.Status.Replicas {
return true, nil
}
e2e.Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
}
return false, nil
})
return err
}