/
run.sh
executable file
·186 lines (181 loc) · 6.82 KB
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
#!/usr/bin/bash
. common.sh
. build_helper.sh
. ../../utils/compare.sh
label=""
case ${WORKLOAD} in
cluster-density)
WORKLOAD_TEMPLATE=workloads/cluster-density/cluster-density.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics-aggregated.yaml}
export TEST_JOB_ITERATIONS=${JOB_ITERATIONS:-1000}
;;
node-density)
WORKLOAD_TEMPLATE=workloads/node-pod-density/node-pod-density.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml}
NODE_COUNT=${NODE_COUNT:-$(kubectl get node -l ${WORKER_NODE_LABEL},node-role.kubernetes.io/infra!=,node-role.kubernetes.io/workload!= -o name | wc -l)}
PODS_PER_NODE=${PODS_PER_NODE:-245}
label="node-density=enabled"
label_node_with_label $label
find_running_pods_num regular
;;
node-density-heavy)
WORKLOAD_TEMPLATE=workloads/node-density-heavy/node-density-heavy.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml}
NODE_COUNT=${NODE_COUNT:-$(kubectl get node -l ${WORKER_NODE_LABEL},node-role.kubernetes.io/infra!=,node-role.kubernetes.io/workload!= -o name | wc -l)}
PODS_PER_NODE=${PODS_PER_NODE:-245}
export NAMESPACED_ITERATIONS=${NAMESPACED_ITERATIONS:-false}
label="node-density=enabled"
label_node_with_label $label
find_running_pods_num heavy
;;
node-density-cni)
WORKLOAD_TEMPLATE=workloads/node-density-cni/node-density-cni.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml}
NODE_COUNT=${NODE_COUNT:-$(kubectl get node -l ${WORKER_NODE_LABEL},node-role.kubernetes.io/infra!=,node-role.kubernetes.io/workload!= -o name | wc -l)}
PODS_PER_NODE=${PODS_PER_NODE:-245}
export NAMESPACED_ITERATIONS=${NAMESPACED_ITERATIONS:-false}
label="node-density=enabled"
label_node_with_label $label
find_running_pods_num cni
;;
node-density-cni-networkpolicy)
WORKLOAD_TEMPLATE=workloads/node-density-cni-networkpolicy/node-density-cni-networkpolicy.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml}
NODE_COUNT=${NODE_COUNT:-$(kubectl get node -l ${WORKER_NODE_LABEL},node-role.kubernetes.io/infra!=,node-role.kubernetes.io/workload!= -o name | wc -l)}
PODS_PER_NODE=${PODS_PER_NODE:-245}
label="node-density=enabled"
label_node_with_label $label
find_running_pods_num cni
;;
pod-density)
WORKLOAD_TEMPLATE=workloads/node-pod-density/node-pod-density.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml}
export TEST_JOB_ITERATIONS=${PODS:-1000}
;;
pod-density-heavy)
WORKLOAD_TEMPLATE=workloads/pod-density-heavy/pod-density-heavy.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml}
NODE_COUNT=${NODE_COUNT:-$(kubectl get node -l ${WORKER_NODE_LABEL},node-role.kubernetes.io/infra!=,node-role.kubernetes.io/workload!= -o name | wc -l)}
PODS_PER_NODE=${PODS_PER_NODE:-245}
label="pod-density-heavy=enabled"
label_node_with_label $label
find_running_pods_num regular
;;
pods-service-route)
WORKLOAD_TEMPLATE=workloads/pods-service-route/pods-service-route.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml}
export TEST_JOB_ITERATIONS=${NAMESPACE_COUNT:-1000}
;;
max-namespaces)
WORKLOAD_TEMPLATE=workloads/max-namespaces/max-namespaces.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics-aggregated.yaml}
export TEST_JOB_ITERATIONS=${NAMESPACE_COUNT:-1000}
;;
max-services)
WORKLOAD_TEMPLATE=workloads/max-services/max-services.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics-aggregated.yaml}
export TEST_JOB_ITERATIONS=${SERVICE_COUNT:-1000}
;;
concurrent-builds)
rm -rf conc_builds_results.out
WORKLOAD_TEMPLATE=workloads/concurrent-builds/concurrent-builds.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics-aggregated.yaml}
export build_test_repo=${BUILD_TEST_REPO:=https://github.com/openshift/svt.git}
export build_test_branch=${BUILD_TEST_BRANCH:=master}
install_svt_repo
export build_array=($BUILD_LIST)
label="concurrent-builds=enabled"
label_node_with_label $label
max=1
for v in ${build_array[@]}; do
if (( $v > $max )); then
max=$v
fi
done
export MAX_CONC_BUILDS=$((max + 1))
export TEST_JOB_ITERATIONS=${MAX_CONC_BUILDS:-$max}
;;
cluster-density-ms)
WORKLOAD_TEMPLATE=workloads/managed-services/cluster-density.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/hypershift-metrics.yaml}
export TEST_JOB_ITERATIONS=${JOB_ITERATIONS:-75}
;;
networkpolicy-case1)
WORKLOAD_TEMPLATE=workloads/networkpolicy/case1.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics-ovn.yaml}
export TEST_JOB_ITERATIONS=${JOB_ITERATIONS:-500}
prep_networkpolicy_workload
;;
networkpolicy-case2)
WORKLOAD_TEMPLATE=workloads/networkpolicy/case2.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics-ovn.yaml}
export TEST_JOB_ITERATIONS=${JOB_ITERATIONS:-5}
prep_networkpolicy_workload
;;
networkpolicy-case3)
WORKLOAD_TEMPLATE=workloads/networkpolicy/case3.yml
METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics-ovn.yaml}
export TEST_JOB_ITERATIONS=${JOB_ITERATIONS:-5}
prep_networkpolicy_workload
;;
custom)
;;
*)
log "Unknown workload ${WORKLOAD}, exiting"
exit 1
;;
esac
cat << EOF
###############################################
Workload: ${WORKLOAD}
Workload template: ${WORKLOAD_TEMPLATE}
Metrics profile: ${METRICS_PROFILE}
Alerts profile: ${ALERTS_PROFILE}
QPS: ${QPS}
Burst: ${BURST}
UUID: ${UUID}
EOF
JOB_START=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
if [[ ${WORKLOAD} == node-density* || ${WORKLOAD} == pod-density-heavy ]]; then
echo "Node count: ${NODE_COUNT}"
echo "Pods per node: ${PODS_PER_NODE}"
else
echo "Job iterations: ${TEST_JOB_ITERATIONS}"
fi
echo "###############################################"
if [[ ${PPROF_COLLECTION} == "true" ]] ; then
delete_pprof_secrets
delete_oldpprof_folder
get_pprof_secrets
fi
if [[ ${WORKLOAD} == "concurrent-builds" ]]; then
app_array=($APP_LIST)
for app in "${app_array[@]}"
do
run_build_workload $app
done
unlabel_nodes_with_label $label
cat conc_builds_results.out
else
run_workload
fi
JOB_END=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
if [[ ${CLEANUP_WHEN_FINISH} == "true" ]]; then
cleanup
if [[ ${WORKLOAD} == node-density* || ${WORKLOAD} == pod-density-heavy ]]; then
unlabel_nodes_with_label $label
fi
fi
delete_pprof_secrets
if [[ ${ENABLE_SNAPPY_BACKUP} == "true" ]] ; then
tar czf pprof.tar.gz ./pprof-data
snappy_backup "" "pprof.tar.gz" ${WORKLOAD}
fi
run_benchmark_comparison
if [ $rc -eq 0 ]; then
JOB_STATUS="success"
else
JOB_STATUS="failure"
fi
env JOB_START="$JOB_START" JOB_END="$JOB_END" JOB_STATUS="$JOB_STATUS" UUID="$UUID" WORKLOAD="$WORKLOAD" ES_SERVER="$ES_SERVER" ../../utils/index.sh
exit ${rc}