forked from msazurestackworkloads/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
example_k8petstore.go
187 lines (157 loc) · 6.55 KB
/
example_k8petstore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
k8bpsContainerVersion = "r.2.8.19" // Container version, see the examples/k8petstore dockerfiles for details.
k8bpsThroughputDummy = "0" // Polling time = 0, since we framework.Poll in ginkgo rather than using the shell script tests.
k8bpsRedisSlaves = "1" // Number of redis slaves.
k8bpsDontRunTest = "0" // Don't bother embedded test.
k8bpsStartupTimeout = 30 * time.Second // Amount of elapsed time before petstore transactions are being stored.
k8bpsMinTransactionsOnStartup = 3 // Amount of transactions we expect we should have before data generator starts.
// Constants for the first test. We can make this a hashmap once we add scale tests to it.
k8bpsSmokeTestFinalTransactions = 50
k8bpsSmokeTestTimeout = 60 * time.Second
)
// readTransactions reads # of transactions from the k8petstore web server endpoint.
// for more details see the source of the k8petstore web server.
func readTransactions(c clientset.Interface, ns string) (error, int) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
return errProxy, -1
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err := proxyRequest.Namespace(ns).
Context(ctx).
Name("frontend").
Suffix("llen").
DoRaw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to read petstore transactions: %v", err)
}
return err, -1
}
totalTrans, err := strconv.Atoi(string(body))
return err, totalTrans
}
// runK8petstore runs the k8petstore application, bound to external nodeport, and
// polls until finalTransactionsExpected transactions are acquired, in a maximum of maxSeconds.
func runK8petstore(restServers int, loadGenerators int, c clientset.Interface, ns string, finalTransactionsExpected int, maxTime time.Duration) {
var err error = nil
k8bpsScriptLocation := filepath.Join(framework.TestContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh")
cmd := exec.Command(
k8bpsScriptLocation,
framework.TestContext.KubectlPath,
k8bpsContainerVersion,
k8bpsThroughputDummy,
strconv.Itoa(restServers),
strconv.Itoa(loadGenerators),
k8bpsRedisSlaves,
k8bpsDontRunTest, // Don't bother embedded test.
ns,
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
framework.Logf("Starting k8petstore application....")
// Run the k8petstore app, and log / fail if it returns any errors.
// This should return quickly, assuming containers are downloaded.
if err = cmd.Start(); err != nil {
framework.Failf("%v", err)
}
// Make sure there are no command errors.
if err = cmd.Wait(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
framework.Logf("Exit Status: %d", status.ExitStatus())
}
}
}
Expect(err).NotTo(HaveOccurred())
framework.Logf("... Done starting k8petstore ")
totalTransactions := 0
framework.Logf("Start polling, timeout is %v seconds", maxTime)
// How long until the FIRST transactions are created.
startupTimeout := time.After(time.Duration(k8bpsStartupTimeout))
// Maximum time to wait until we reach the nth transaction.
transactionsCompleteTimeout := time.After(time.Duration(maxTime))
tick := time.Tick(2 * time.Second)
var ready = false
framework.Logf("Now waiting %v seconds to see progress (transactions > 3)", k8bpsStartupTimeout)
T:
for {
select {
case <-transactionsCompleteTimeout:
framework.Logf("Completion timeout %v reached, %v transactions not complete. Breaking!", time.Duration(maxTime), finalTransactionsExpected)
break T
case <-tick:
// Don't fail if there's an error. We expect a few failures might happen in the cloud.
err, totalTransactions = readTransactions(c, ns)
if err == nil {
framework.Logf("PetStore : Time: %v, %v = total petstore transactions stored into redis.", time.Now(), totalTransactions)
if totalTransactions >= k8bpsMinTransactionsOnStartup {
ready = true
}
if totalTransactions >= finalTransactionsExpected {
break T
}
} else {
if ready {
framework.Logf("Blip: during polling: %v", err)
} else {
framework.Logf("Not ready yet: %v", err)
}
}
case <-startupTimeout:
if !ready {
framework.Logf("Startup Timeout %v reached: Its been too long and we still haven't started accumulating %v transactions!", startupTimeout, k8bpsMinTransactionsOnStartup)
break T
}
}
}
// We should have exceeded the finalTransactionsExpected num of transactions.
// If this fails, but there are transactions being created, we may need to recalibrate
// the finalTransactionsExpected value - or else - your cluster is broken/slow !
Expect(totalTransactions).To(BeNumerically(">", finalTransactionsExpected))
}
var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
BeforeEach(func() {
// The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress.
framework.SkipUnlessProviderIs("local")
})
// The number of nodes dictates total number of generators/transaction expectations.
var nodeCount int
f := framework.NewDefaultFramework("petstore")
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
loadGenerators := nodeCount
restServers := nodeCount
fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers)
runK8petstore(restServers, loadGenerators, f.ClientSet, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout)
})
})