forked from onosproject/helmit
/
benchmark.go
215 lines (187 loc) · 7.67 KB
/
benchmark.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
// Copyright 2020-present Open Networking Foundation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"errors"
"os"
"path/filepath"
"time"
"github.com/wangxn2015/helmit/pkg/job"
"github.com/spf13/cobra"
"github.com/wangxn2015/helmit/pkg/benchmark"
"github.com/wangxn2015/helmit/pkg/util/random"
corev1 "k8s.io/api/core/v1"
)
const benchExamples = `
# Run benchmarks packaged in a Docker image.
helmit bench --image atomix/kubernetes-benchmarks:latest --duration 1m
# Run benchmarks by referencing a command package and providing a context.
# The specified context will be loaded into the benchmark pods as the current working directory.
helmit bench ./cmd/benchmarks --context ./charts --iterations 1000
# Run benchmarks in a specific namespace.
helmit bench ./cmd/benchmarks -n bench --suite atomix --duration 5m
# Run a benchmark suite by name.
helmit bench ./cmd/benchmarks -c ./charts --suite atomix --duration 5m
# Run a single benchmark function by name.
helmit bench ./cmd/benchmarks -c ./charts --suite atomix --benchmark BenchmarkGet --duration 5m
# Parallelize benchmark clients across goroutines.
helmit bench ./cmd/benchmarks -c ./charts --suite atomix --parallel 10 --duration 1m
# Parallelize benchmark clients across worker pods.
helmit bench ./cmd/benchmarks -c ./charts --suite atomix --workers 4 --duration 1m
# Override Helm chart values with flags.
# Value overrids must be namespaced with the name of the release to which to apply the value.
helmit bench ./cmd/benchmarks -c ./charts --set atomix-controller.image=atomix/atomix-controller:latest --set atomix-raft.replicas=3 --suite atomix --iterations 1000
# Override Helm chart values with values files.
# Values files must be key/value pairs where the key is the Helm release name and the value the path to the file.
helmit bench ./cmd/benchmarks -c ./charts -f atomix-controller=./atomix-controller.yaml --suite atomix --duration 1m
`
func getBenchCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "benchmark",
Aliases: []string{"benchmarks", "bench"},
Short: "Run benchmarks on Kubernetes",
Example: benchExamples,
Args: cobra.MaximumNArgs(1),
RunE: runBenchCommand,
}
cmd.Flags().StringP("namespace", "n", "default", "the namespace in which to run the benchmarks")
cmd.Flags().String("service-account", "", "the name of the service account to use to run worker pods")
cmd.Flags().StringToString("labels", map[string]string{}, "a mapping of labels to add to the test pod")
cmd.Flags().StringToString("annotations", map[string]string{}, "a mapping of annotations to add to the test pod")
cmd.Flags().StringP("context", "c", "", "the benchmark context")
cmd.Flags().StringP("image", "i", "", "the benchmark image to run")
cmd.Flags().String("image-pull-policy", string(corev1.PullIfNotPresent), "the Docker image pull policy")
cmd.Flags().StringArrayP("values", "f", []string{}, "release values paths")
cmd.Flags().StringArray("set", []string{}, "cluster argument overrides")
cmd.Flags().StringP("suite", "s", "", "the benchmark suite to run")
cmd.Flags().StringP("benchmark", "b", "", "the name of the benchmark to run")
cmd.Flags().IntP("workers", "w", 1, "the number of workers to run")
cmd.Flags().Int("parallel", 1, "the number of concurrent goroutines per client")
cmd.Flags().IntP("iterations", "", 0, "the number of iterations to run")
cmd.Flags().DurationP("max-latency", "m", 0, "maximum latency allowed")
cmd.Flags().DurationP("duration", "d", 0, "the duration for which to run the test")
cmd.Flags().StringToStringP("args", "a", map[string]string{}, "a mapping of named benchmark arguments")
cmd.Flags().Duration("timeout", 10*time.Minute, "benchmark timeout")
cmd.Flags().Bool("no-teardown", false, "do not tear down clusters following benchmarks")
cmd.Flags().StringSlice("secret", []string{}, "secrets to pass to the kubernetes pod")
return cmd
}
func runBenchCommand(cmd *cobra.Command, args []string) error {
setupCommand(cmd)
pkgPath := ""
if len(args) > 0 {
pkgPath = args[0]
}
namespace, _ := cmd.Flags().GetString("namespace")
serviceAccount, _ := cmd.Flags().GetString("service-account")
labels, _ := cmd.Flags().GetStringToString("labels")
annotations, _ := cmd.Flags().GetStringToString("annotations")
context, _ := cmd.Flags().GetString("context")
image, _ := cmd.Flags().GetString("image")
suite, _ := cmd.Flags().GetString("suite")
benchmarkName, _ := cmd.Flags().GetString("benchmark")
workers, _ := cmd.Flags().GetInt("workers")
parallelism, _ := cmd.Flags().GetInt("parallel")
iterations, _ := cmd.Flags().GetInt("iterations")
duration, _ := cmd.Flags().GetDuration("duration")
files, _ := cmd.Flags().GetStringArray("values")
sets, _ := cmd.Flags().GetStringArray("set")
benchArgs, _ := cmd.Flags().GetStringToString("args")
timeout, _ := cmd.Flags().GetDuration("timeout")
imagePullPolicy, _ := cmd.Flags().GetString("image-pull-policy")
pullPolicy := corev1.PullPolicy(imagePullPolicy)
noTeardown, _ := cmd.Flags().GetBool("no-teardown")
secretsArray, _ := cmd.Flags().GetStringSlice("secret")
// Either --iterations or --duration must be specified
if iterations == 0 && duration == 0 {
return errors.New("either --iterations or --duration must be specified")
}
// Either a command package or image must be specified
if pkgPath == "" && image == "" {
return errors.New("must specify either a benchmark package or --image to run")
}
// Generate a unique benchmark ID
benchID := random.NewPetName(2)
// If a command package was provided, build a binary and update the image tag
var executable string
if pkgPath != "" {
executable = filepath.Join(os.TempDir(), "helmit", benchID)
err := buildBinary(pkgPath, executable)
if err != nil {
cmd.SilenceUsage = true
cmd.SilenceErrors = true
return err
}
if image == "" {
image = "onosproject/helmit-runner:latest"
}
}
// If a context was provided, convert the context to its absolute path
if context != "" {
path, err := filepath.Abs(context)
if err != nil {
return err
}
context = path
}
var maxLatency *time.Duration
if cmd.Flags().Changed("max-latency") {
d, _ := cmd.Flags().GetDuration("max-latency")
maxLatency = &d
}
valueFiles, err := parseFiles(files)
if err != nil {
return err
}
values, err := parseOverrides(sets)
if err != nil {
return err
}
var d *time.Duration
if duration != 0 {
d = &duration
}
secrets, err := parseSecrets(secretsArray)
if err != nil {
return err
}
config := &benchmark.Config{
Config: &job.Config{
ID: benchID,
Namespace: namespace,
ServiceAccount: serviceAccount,
Labels: labels,
Annotations: annotations,
Executable: executable,
Image: image,
ImagePullPolicy: pullPolicy,
Context: context,
ValueFiles: valueFiles,
Values: values,
Timeout: timeout,
NoTeardown: noTeardown,
Secrets: secrets,
},
Suite: suite,
Benchmark: benchmarkName,
Workers: workers,
Parallelism: parallelism,
Iterations: iterations,
Duration: d,
Args: benchArgs,
MaxLatency: maxLatency,
NoTeardown: noTeardown,
}
return benchmark.Run(config)
}