forked from kubernetes/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
e2e.go
276 lines (241 loc) · 10.7 KB
/
e2e.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/gomega"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/logs"
"k8s.io/kubernetes/pkg/version"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/manifest"
testutils "k8s.io/kubernetes/test/utils"
// ensure auth plugins are loaded
_ "k8s.io/client-go/plugin/pkg/client/auth"
// ensure that cloud providers are loaded
_ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
_ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
)
var (
cloudConfig = &framework.TestContext.CloudConfig
nodeKillerStopCh = make(chan struct{})
)
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// to ensure that these operations only run on the first parallel Ginkgo node.
//
// This function takes two parameters: one function which runs on only the first Ginkgo node,
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
// accepting the byte array.
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// Run only on Ginkgo node 1
switch framework.TestContext.Provider {
case "gce", "gke":
framework.LogClusterImageSources()
}
c, err := framework.LoadClientset()
if err != nil {
klog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except those created by the system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
[]string{
metav1.NamespaceSystem,
metav1.NamespaceDefault,
metav1.NamespacePublic,
})
if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err)
}
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
framework.TestContext.CloudConfig.NumNodes = len(framework.GetReadySchedulableNodesOrDie(c).Items)
}
// Ensure all pods are running and ready before starting tests (otherwise,
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
// TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.
framework.Logf("e2e test version: %s", version.Get().GitVersion)
dc := c.DiscoveryClient
serverVersion, serverErr := dc.ServerVersion()
if serverErr != nil {
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
}
if serverVersion != nil {
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
}
// Reference common test to make the import valid.
commontest.CurrentSuite = commontest.E2E
if framework.TestContext.NodeKiller.Enabled {
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
nodeKillerStopCh = make(chan struct{})
go nodeKiller.Run(nodeKillerStopCh)
}
return nil
}, func(data []byte) {
// Run on all Ginkgo nodes
})
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
framework.Logf("Running AfterSuite actions on all nodes")
framework.RunCleanupActions()
}, func() {
// Run only Ginkgo on node 1
framework.Logf("Running AfterSuite actions on node 1")
if framework.TestContext.ReportDir != "" {
framework.CoreDump(framework.TestContext.ReportDir)
}
if framework.TestContext.GatherSuiteMetricsAfterTest {
if err := gatherTestSuiteMetrics(); err != nil {
framework.Logf("Error gathering metrics: %v", err)
}
}
if framework.TestContext.NodeKiller.Enabled {
close(nodeKillerStopCh)
}
})
func gatherTestSuiteMetrics() error {
framework.Logf("Gathering metrics")
c, err := framework.LoadClientset()
if err != nil {
return fmt.Errorf("error loading client: %v", err)
}
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
grabber, err := metrics.NewMetricsGrabber(c, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
return fmt.Errorf("failed to create MetricsGrabber: %v", err)
}
received, err := grabber.Grab()
if err != nil {
return fmt.Errorf("failed to grab metrics: %v", err)
}
metricsForE2E := (*framework.MetricsForE2E)(&received)
metricsJSON := metricsForE2E.PrintJSON()
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
return fmt.Errorf("error writing to %q: %v", filePath, err)
}
} else {
framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
}
return nil
}
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
// If a "report directory" is specified, one or more JUnit test reports will be
// generated in this directory, and cluster logs will also be saved.
// This function is called on each Ginkgo node in parallel mode.
func RunE2ETests(t *testing.T) {
runtimeutils.ReallyCrash = true
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(ginkgowrapper.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
var r []ginkgo.Reporter
if framework.TestContext.ReportDir != "" {
// TODO: we should probably only be trying to create this directory once
// rather than once-per-Ginkgo-node.
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
klog.Errorf("Failed creating report directory: %v", err)
} else {
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
}
}
klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
framework.Logf("Parsing pod from %v", path)
p, err := manifest.PodFromManifest(path)
if err != nil {
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
framework.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
framework.Logf("Output of clusterapi-tester:\n%v", logs)
}
}