diff --git a/.gitignore b/.gitignore index 7b2c2ac5..0644444e 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,5 @@ acmeair/acmeair/.metadata acmeair/acmeair/.classpath acmeair/acmeair/.project acmeair/acmeair/.gradle +.idea/ acmeair/jmeter-driver/acmeair-jmeter/build diff --git a/renaissance/README.md b/renaissance/README.md new file mode 100644 index 00000000..59c23504 --- /dev/null +++ b/renaissance/README.md @@ -0,0 +1,58 @@ + +# About The Benchmark +The Renaissance Benchmark Suite aggregates common modern JVM workloads, including, but not limited to, Big Data, machine-learning, and functional programming. The suite is intended to be used to optimize just-in-time compilers, interpreters, GCs, and for tools such as profilers, debuggers, or static analyzers, and even different hardware. It is intended to be an open-source, collaborative project, in which the community can propose and improve benchmark workloads. + +More information about this benchmark can be found on [Renaissance](https://github.com/renaissance-benchmarks/renaissance) +# Prerequisites +To generate the results from the Renaissance Benchmark,we need to: + + - Install minikube(kubernetes cluster),which can be done from [Minikube](https://minikube.sigs.k8s.io/docs/start/) and then install prometheus on the minikube cluster. This can be done by following the steps in the [Autotune Installation](https://github.com/kruize/autotune/blob/master/docs/autotune_install.md). + + You,also need to install a driver of your choice for running renaissance onto your local system + + Download a driver (docker or podman) + + [Docker](https://docs.docker.com/engine/install/) + + [Podman](https://podman.io/getting-started/installation) + + # How To Run This Benchmark + + To run the benchmark on kubernetes cluster to collect performance metrics + + ./scripts/perf/renaissance-run.sh --clustertype=CLUSTER_TYPE -s BENCHMARK_SERVER -e RESULTS_DIR_PATH [-w WARMUPS] [-m MEASURES] [-i TOTAL_INST] [--iter=TOTAL_ITR] [-r= set redeploy to true] [-n NAMESPACE] [-g RENAISSANCE_IMAGE] [--cpureq=CPU_REQ] [--memreq=MEM_REQ] [--cpulim=CPU_LIM] [--memlim=MEM_LIM] [-b BENCHMARKS] [-R REPETITIONS] [-d DURATION] " + + - **CLUSTER_TYPE**: Type of cluster. Supports openshift , minikube. +- **BENCHMARK_SERVER**: Name of the cluster you are using +- **RESULTS_DIR_PATH**: Directory to store results +- **DURATION**: Duration of each warmup and measurement run. +- **WARMUPS**: No.of warmup runs. +- **MEASURES**: No.of measurement runs. +- **ITERATIONS**: No.of iterations. +- **CPU_REQ**: CPU request +- **MEM_REQ**: Memory request +- **CPU_LIM**: CPU limit +- **MEM_LIM**: Memory limit +- **RENAISSANCE_IMAGE**:prakalp23/renaissance1041:latest +- **BENCHMARKS**:Choice of a microbenchmark from Renaissance [Microbenchmarks](https://github.com/renaissance-benchmarks/renaissance) + +Example:./renaissance-run.sh --clustertype=minikube -s localhost -e ./results -w 1 -m 1 -i 1 --iter=1 -r -n default --cpureq=1.5 --memreq=3152M --cpulim=1.5 --memlim=3152M -b "page-rank" -g prakalp23/renaissance1041:latest -d 60 + + # The Experiment Results + + The experiment results using the above scripts generates a csv file which contains the resource usage information for the Renaissance Benchmark can be found here + + [Renaissance Results](https://github.com/Prakalp23/autotune-results/tree/renaissance/Renaissance) + + +## Scripts Details + +| Script Name | What it does? | +|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| perf/getmetrics-promql.sh | Has prometheus queries that are required calculate the metrics required for objective function and the benchmark. | +| perf/parsemetrics-promql.sh | Parse the prometheus metrics data to calculate the average , max and min values as per the requirement of the benchmark. | +| perf/ci.php | Use to measure confidence interval of data. | +| perf/parsemetrics-wrk.sh | Parse the metrics data from hyperfoil/wrk load simulator. | + + + diff --git a/renaissance/docker/Dockerfile b/renaissance/docker/Dockerfile new file mode 100644 index 00000000..8893b2c1 --- /dev/null +++ b/renaissance/docker/Dockerfile @@ -0,0 +1,11 @@ +FROM eclipse-temurin:17.0.3_7-jre +WORKDIR /target +RUN mkdir -p /output +ADD https://github.com/renaissance-benchmarks/renaissance/releases/download/v0.14.1/renaissance-gpl-0.14.1.jar /target +COPY renaissance-gpl-0.14.1.jar /target/renaissance-gpl-0.14.1.jar +ADD https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.0/jmx_prometheus_javaagent-0.17.0.jar /target +COPY config.yaml config.yaml +ENV JDK_JAVA_OPTIONS= +ENV BENCHMARK=all +ENV TIME_LIMIT=5 +ENTRYPOINT java -javaagent:./jmx_prometheus_javaagent-0.17.0.jar=8080:config.yaml ${JDK_JAVA_OPTIONS} -jar /target/renaissance-gpl-0.14.1.jar -t ${TIME_LIMIT} --csv /output/renaissance-output.csv ${BENCHMARK} diff --git a/renaissance/docker/config.yaml b/renaissance/docker/config.yaml new file mode 100644 index 00000000..772c2a40 --- /dev/null +++ b/renaissance/docker/config.yaml @@ -0,0 +1,2 @@ +rules: +- pattern: ".*" diff --git a/renaissance/manifests/renaissance.yaml b/renaissance/manifests/renaissance.yaml new file mode 100644 index 00000000..32d374f7 --- /dev/null +++ b/renaissance/manifests/renaissance.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: renaissance-sample + labels: + app: renaissance-app +spec: + replicas: 1 + selector: + matchLabels: + app: renaissance-deployment + template: + metadata: + labels: + name: renaissance-deployment + app: renaissance-deployment + # Add label to the application which is used by kruize/autotune to monitor it + app.kubernetes.io/name: "renaissance-deployment" + app.kubernetes.io/layer: "hotspot" + version: v1 + spec: + volumes: + - name: test-volume + containers: + - name: renaissance-server + image: prakalp23/renaissance1041:latest + imagePullPolicy: IfNotPresent + env: + ports: + - containerPort: 8080 + resources: + requests: + limits: + volumeMounts: + - name: "test-volume" + mountPath: "/opt/jLogs" +--- +apiVersion: v1 +kind: Service +metadata: + name: renaissance-service + annotations: + prometheus.io/scrape: 'true' + prometheus.io/path: '/metrics' + labels: + app: renaissance-app +spec: + type: NodePort + ports: + - port: 8080 + targetPort: 8080 + name: renaissance-port + selector: + name: renaissance-deployment diff --git a/renaissance/manifests/service-monitor.yaml b/renaissance/manifests/service-monitor.yaml new file mode 100644 index 00000000..24bcfc3c --- /dev/null +++ b/renaissance/manifests/service-monitor.yaml @@ -0,0 +1,13 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: renaissance + labels: + team: renaissance-frontend +spec: + selector: + matchLabels: + app: renaissance-app + endpoints: + - port: renaissance-port + path: '/metrics' diff --git a/renaissance/scripts/perf/getmetrics-promql.sh b/renaissance/scripts/perf/getmetrics-promql.sh new file mode 100755 index 00000000..1d767fc8 --- /dev/null +++ b/renaissance/scripts/perf/getmetrics-promql.sh @@ -0,0 +1,271 @@ +#!/bin/bash +# +# Copyright (c) 2022,2022 IBM Corporation, RedHat and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +### Script to get pod and cluster information through prometheus queries### +# +# checks if the previous command is executed successfully +# input:Return value of previous command +# output:Prompts the error message if the return value is not zero +function err_exit() +{ + if [ $? != 0 ]; then + printf "$*" + echo + exit 1 + fi +} + +function get_cpu() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + # Delete the old json file if any + rm -rf ${RESULTS_DIR}/cpu-${ITER}.json + while true + do + # Processing curl output "timestamp value" using jq tool. +# echo "curl --silent -G -kH Authorization: Bearer ${TOKEN} --data-urlencode 'query=sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate) by (pod)' ${URL} " + curl --silent --data-urlencode 'query=sum(rate(container_cpu_usage_seconds_total[5m])) by (pod,namespace)' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [.metric.pod, .value[0], .value[1]|tostring]| join(";") ]' | grep "${APP_NAME}"| cut -d ";" -f2,3 >> ${RESULTS_DIR}/cpu-${ITER}.json +sleep 15 + done +} + +## Collect MEM_RSS +function get_mem_rss() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + # Delete the old json file if any + rm -rf ${RESULTS_DIR}/mem-${ITER}.json + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(container_memory_rss) by (pod)' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .metric.pod,.value[0], .value[1]|tostring]| join(";") ]' | grep "${APP_NAME}"| cut -d ";" -f2,3 >> ${RESULTS_DIR}/mem-${ITER}.json + + sleep 15 + done +} + +function get_mem_usage() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + # Delete the old json file if any + rm -rf ${RESULTS_DIR}/memusage-${ITER}.json + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(container_memory_working_set_bytes) by (pod) ' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .metric.pod,.value[0], .value[1]|tostring] | join(";") ]' | grep "${APP_NAME}"|cut -d ";" -f2,3 >> ${RESULTS_DIR}/memusage-${ITER}.json + + sleep 15 + done +} + +## Collect network bytes received +function get_container_network_receive_bytes_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_network_receive_bytes_total[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep -E "[0-9]" >> ${RESULTS_DIR}/netreceivebytes-${ITER}.json + sleep 15 + done +} + +function get_container_network_transmit_bytes_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_network_transmit_bytes_total[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep -E "[0-9]" >> ${RESULTS_DIR}/nettransmitbytes-${ITER}.json + sleep 15 + done +} + +function get_container_network_receive_packets_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_network_receive_packets_total[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep -E "[0-9]" >> ${RESULTS_DIR}/cnetreceivebytes-${ITER}.json + sleep 15 + done +} + +function get_container_network_transmit_packets_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_network_transmit_packets_total[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep -E "[0-9]" >> ${RESULTS_DIR}/cnettransmitbytes-${ITER}.json + sleep 15 + done +} + +function get_disk_details_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_fs_usage_bytes[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep -E "[0-9]" >> ${RESULTS_DIR}/diskdetails-${ITER}.json + sleep 15 + done +} + +function get_container_fs_io_time_seconds_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_fs_io_time_seconds_total[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep -E "[0-9]" >> ${RESULTS_DIR}/fsiototal-${ITER}.json + sleep 15 + done +} + +function get_container_fs_read_seconds_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_fs_read_seconds_total[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep -E "[0-9]" >> ${RESULTS_DIR}/fsreadtotal-${ITER}.json + sleep 15 + done +} + +function get_container_fs_write_seconds_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(container_fs_write_seconds_total[60s]))' http://localhost:9090/api/v1/query | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]'| grep -E "[0-9]" >> ${RESULTS_DIR}/fswritetotal-${ITER}.json + sleep 15 + done +} + +#this is not required for renaissance as of now,will be updated later +function get_request_duration_seconds_sum_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(http_request_duration_seconds_sum[60s])) by (pod)' | jq '[ .data.result[] | [ .value[0], .value[1]|tostring] | join(";") ]' | grep "${APP_NAME}" http://localhost:9090/api/v1/query >> ${RESULTS_DIR}/get_request_duration_seconds_sum_total-${ITER}.json + #err_exit "Error: could not get request sum duration of seconds details of the pod" >>setup.log + sleep 15 + done +} +function get_request_duration_seconds_count_total() +{ + URL=$1 + TOKEN=$2 + RESULTS_DIR=$3 + ITER=$4 + APP_NAME=$5 + while true + do + # Processing curl output "timestamp value" using jq tool. + curl --silent -G -kH "Authorization: Bearer ${TOKEN}" --data-urlencode 'query=sum(rate(http_request_duration_seconds_count[60s])) by (pod)'| jq '[ .data.result[] | [ .value[0], .value[1]|tostring]| join(";") ]' | grep "${APP_NAME}" http://localhost:9090/api/v1/query >> ${RESULTS_DIR}/get_request_duration_seconds_count_total-${ITER}.json + #err_exit "Error: could not get request count details of the pod" >>setup.log + sleep 15 + done +} +ITER=$1 +TIMEOUT=$2 +RESULTS_DIR=$3 +BENCHMARK_SERVER=$4 +APP_NAME=$5 +CLUSTER_TYPE=$6 + +mkdir -p ${RESULTS_DIR} +if [[ ${CLUSTER_TYPE} == "openshift" ]]; then + QUERY_APP=thanos-querier-openshift-monitoring.apps + URL=https://${QUERY_APP}.${BENCHMARK_SERVER}/api/v1/query + TOKEN=`oc whoami --show-token` +elif [[ ${CLUSTER_TYPE} == "minikube" ]]; then + #QUERY_IP=`minikibe ip` + QUERY_APP="${BENCHMARK_SERVER}:9090" + URL=http://${QUERY_APP}/api/v1/query + TOKEN=TOKEN +fi +export -f err_exit get_cpu get_mem_rss get_mem_usage get_container_network_receive_bytes_total +export -f get_container_network_transmit_bytes_total get_container_network_receive_packets_total get_container_network_transmit_packets_total get_disk_details_total +export -f get_container_fs_io_time_seconds_total get_container_fs_read_seconds_total get_container_fs_write_seconds_total get_request_duration_seconds_sum_total get_request_duration_seconds_count_total +echo "Collecting metric data" >> setup.log +timeout ${TIMEOUT} bash -c "get_cpu ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_mem_rss ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_mem_usage ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_container_network_receive_bytes_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_container_network_transmit_bytes_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_container_network_receive_packets_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_container_network_transmit_packets_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_disk_details_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_container_fs_io_time_seconds_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_container_fs_read_seconds_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +timeout ${TIMEOUT} bash -c "get_container_fs_write_seconds_total ${URL} ${TOKEN} ${RESULTS_DIR} ${ITER} ${APP_NAME}" & +sleep ${TIMEOUT} diff --git a/renaissance/scripts/perf/parsemetrics-promql.sh b/renaissance/scripts/perf/parsemetrics-promql.sh new file mode 100755 index 00000000..83b41694 --- /dev/null +++ b/renaissance/scripts/perf/parsemetrics-promql.sh @@ -0,0 +1,230 @@ +#!/bin/bash +# +# Copyright (c) 2022,2022 IBM Corporation, RedHat and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +### Script to parse prometheus query data### + + +CURRENT_DIR="$(dirname "$(realpath "$0")")" +source ${CURRENT_DIR}/../utils/common.sh + +# Parse CPU, memory and cluster information +# input:type of run(warmup|measure), total number of runs, iteration number +# output:Creates cpu, memory and cluster information in the form of log files for each run +function parsePromMetrics() { + TYPE=$1 + TOTAL_RUNS=$2 + ITR=$3 + + for (( run=0 ; run<"${TOTAL_RUNS}" ;run++)) + do + for poddatalog in "${POD_CPU_LOGS[@]}" + do + # Parsing CPU, app metric logs for pod + parseDataLog ${poddatalog} ${TYPE} ${run} ${ITR} + done + for podmemlog in "${POD_MEM_LOGS[@]}" + do + # Parsing Mem logs for pod + parsePodMemLog ${podmemlog} ${TYPE} ${run} ${ITR} + done + for poddiskdetailslog in "${POD_DISK_DETAILS_LOGS[@]}" + do + # Parsing Mem logs for pod + parseDataLog ${poddiskdetailslog} ${TYPE} ${run} ${ITR} + done + for podnetworklog in "${POD_NW_LOGS[@]}" + do + # Parsing Network receive logs for pod + parseDataLog ${podnetworklog} ${TYPE} ${run} ${ITR} + done + + for podiolog in "${POD_IO_LOGS[@]}" + do + # Parsing Network transmit logs for pod + parseDataLog ${podiolog} ${TYPE} ${run} ${ITR} + done + done +} + +# Parsing memory logs for pod +# input: podmemlogs array element, type of run(warmup|measure), run(warmup|measure) number, iteration number +# output:creates memory log for pod +function parsePodMemLog() +{ + MODE=$1 + TYPE=$2 + RUN=$3 + ITR=$4 + RESULTS_LOG=${MODE}-${TYPE}-${ITR}.log + MEM_LOG=${RESULTS_DIR_P}/${MODE}-${TYPE}-${RUN}.json + TEMP_LOG=${RESULTS_DIR_P}/temp-mem-${MODE}.log + if [ -s "${MEM_LOG}" ]; then + cat ${MEM_LOG} | cut -d ";" -f2 | cut -d '"' -f1 > ${RESULTS_DIR_P}/temp-mem.log + mem_avg=$( echo `calcAvg_inMB ${RESULTS_DIR_P}/temp-mem.log | cut -d "=" -f2` ) + mem_min=$( echo `calcMin ${RESULTS_DIR_P}/temp-mem.log` ) + mem_min_inMB=$(echo ${mem_min}/1024/1024 | bc) + mem_max=$( echo `calcMax ${RESULTS_DIR_P}/temp-mem.log` ) + mem_max_inMB=$(echo ${mem_max}/1024/1024 | bc) + fi + echo "${run} , ${mem_avg}, ${mem_min_inMB} , ${mem_max_inMB} " >> ${RESULTS_DIR_J}/${RESULTS_LOG} + echo ", ${mem_avg} , ${mem_min_inMB} , ${mem_max_inMB} " >> ${RESULTS_DIR_J}/${MODE}-${TYPE}-raw.log +} + +# Parsing CPU,Network,I/O,Disk logs for pod +# input: podcpulogs array element, type of run(warmup|measure), run(warmup|measure) number, iteration number +# output:creates metric specific log for pod +function parseDataLog() +{ + MODE=$1 + TYPE=$2 + RUN=$3 + ITR=$4 + RESULTS_LOG=${MODE}-${TYPE}-${ITR}.log + DATA_LOG=${RESULTS_DIR_P}/${MODE}-${TYPE}-${RUN}.json + if [ -s "${DATA_LOG}" ]; then + cat ${DATA_LOG} | cut -d ";" -f2 | cut -d '"' -f1 > ${RESULTS_DIR_P}/temp-data.log + data_avg=$( echo `calcAvg ${RESULTS_DIR_P}/temp-data.log | cut -d "=" -f2` ) + data_min=$( echo `calcMin ${RESULTS_DIR_P}/temp-data.log` ) + data_max=$( echo `calcMax ${RESULTS_DIR_P}/temp-data.log` ) + fi + echo "${run} , ${data_avg}, ${data_min} , ${data_max}" >> ${RESULTS_DIR_J}/${RESULTS_LOG} + echo ",${data_avg} , ${data_min} , ${data_max}" >> ${RESULTS_DIR_J}/${MODE}-${TYPE}-raw.log +} + +# Parse the results of jmeter load for each instance of application +# input: total number of iterations, result directory, Total number of instances +# output: Parse the results and generate the Metrics log files +function parseResults() { + TOTAL_ITR=$1 + RESULTS_DIR_J=$2 + SCALE=$3 + WARMUPS=$4 + MEASURES=$5 + + for (( itr=0 ; itr<${TOTAL_ITR} ;itr++)) + do + RESULTS_DIR_P=${RESULTS_DIR_J}/ITR-${itr} + parsePromMetrics warmup ${WARMUPS} ${itr} + parsePromMetrics measure ${MEASURES} ${itr} + + for poddatalog in "${POD_CPU_LOGS[@]}" + do + if [ -s "${RESULTS_DIR_J}/${poddatalog}-measure-${itr}.log" ]; then + cat ${RESULTS_DIR_J}/${poddatalog}-measure-${itr}.log | cut -d "," -f2 >> ${RESULTS_DIR_J}/${poddatalog}-measure-temp.log + cat ${RESULTS_DIR_J}/${poddatalog}-measure-${itr}.log | cut -d "," -f3 >> ${RESULTS_DIR_J}/${poddatalog}_min-measure-temp.log + cat ${RESULTS_DIR_J}/${poddatalog}-measure-${itr}.log | cut -d "," -f4 >> ${RESULTS_DIR_J}/${poddatalog}_max-measure-temp.log + fi + done + for podmemlog in "${POD_MEM_LOGS[@]}" + do + if [ -s "${RESULTS_DIR_J}/${podmemlog}-measure-${itr}.log" ]; then + cat ${RESULTS_DIR_J}/${podmemlog}-measure-${itr}.log | cut -d "," -f2 >> ${RESULTS_DIR_J}/${podmemlog}-measure-temp.log + cat ${RESULTS_DIR_J}/${podmemlog}-measure-${itr}.log | cut -d "," -f3 >> ${RESULTS_DIR_J}/${podmemlog}_min-measure-temp.log + cat ${RESULTS_DIR_J}/${podmemlog}-measure-${itr}.log | cut -d "," -f4 >> ${RESULTS_DIR_J}/${podmemlog}_max-measure-temp.log + fi + done + for poddiskdetailslog in "${POD_DISK_DETAILS_LOGS[@]}" + do + if [ -s "${RESULTS_DIR_J}/${poddiskdetailslog}-measure-${itr}.log" ]; then + cat ${RESULTS_DIR_J}/${poddiskdetailslog}-measure-${itr}.log | cut -d "," -f2 >> ${RESULTS_DIR_J}/${poddiskdetailslog}-measure-temp.log + cat ${RESULTS_DIR_J}/${poddiskdetailslog}-measure-${itr}.log | cut -d "," -f3 >> ${RESULTS_DIR_J}/${poddiskdetailslog}_min-measure-temp.log + cat ${RESULTS_DIR_J}/${poddiskdetailslog}-measure-${itr}.log | cut -d "," -f4 >> ${RESULTS_DIR_J}/${poddiskdetailslog}_max-measure-temp.log + fi + done + for podnetworklog in "${POD_NW_LOGS[@]}" + do + if [ -s "${RESULTS_DIR_J}/${podnetworklog}-measure-${itr}.log" ]; then + cat ${RESULTS_DIR_J}/${podnetworklog}-measure-${itr}.log | cut -d "," -f2 >> ${RESULTS_DIR_J}/${podnetworklog}-measure-temp.log + cat ${RESULTS_DIR_J}/${podnetworklog}-measure-${itr}.log | cut -d "," -f3 >> ${RESULTS_DIR_J}/${podnetworklog}_min-measure-temp.log + cat ${RESULTS_DIR_J}/${podnetworklog}-measure-${itr}.log | cut -d "," -f4 >> ${RESULTS_DIR_J}/${podnetworklog}_max-measure-temp.log + fi + done + + + + for podiolog in "${POD_IO_LOGS[@]}" + do + if [ -s "${RESULTS_DIR_J}/${podiolog}-measure-${itr}.log" ]; then + cat ${RESULTS_DIR_J}/${podiolog}-measure-${itr}.log | cut -d "," -f2 >> ${RESULTS_DIR_J}/${podiolog}-measure-temp.log + cat ${RESULTS_DIR_J}/${podiolog}-measure-${itr}.log | cut -d "," -f3 >> ${RESULTS_DIR_J}/${podiolog}_min-measure-temp.log + cat ${RESULTS_DIR_J}/${podiolog}-measure-${itr}.log | cut -d "," -f4 >> ${RESULTS_DIR_J}/${podiolog}_max-measure-temp.log + fi + done + + done + + ###### Add different raw logs we want to merge + #Cumulative raw data + paste ${RESULTS_DIR_J}/cpu-measure-raw.log ${RESULTS_DIR_J}/mem-measure-raw.log >> ${RESULTS_DIR_J}/../Metrics-cpumem-raw.log + + for metric in "${TOTAL_LOGS[@]}" + do + if [ -s ${RESULTS_DIR_J}/${metric}-measure-temp.log ]; then + if [ ${metric} == "cpu_min" ] || [ ${metric} == "mem_min" ] || [ ${metric} == "memusage_min" ] || [ ${metric} == "diskdetails_min" ] || [ ${metric} == "netreceivebytes_min" ] || [ ${metric} == "nettransmitbytes_min" ] || [ ${metric} == "cnetreceivebytes_min" ] || [ ${metric} == "cnettransmitbytes_min" ] || [ ${metric} == "fsiototal_min" ] || [ ${metric} == "fsreadtotal_min" ] || [ ${metric} == "fswritetotal_min" ] || [ ${metric} == "request_count_min" ] || [ ${metric} == "request_sum_min" ]; then + minval=$(echo `calcMin ${RESULTS_DIR_J}/${metric}-measure-temp.log`) + if [ ! -z ${minval} ]; then + eval total_${metric}=${minval} + else + eval total_${metric}=0 + fi + elif [ ${metric} == "cpu_max" ] || [ ${metric} == "mem_max" ] || [ ${metric} == "memusage_max" ] || [ ${metric} == "diskdetails_max" ] || [ ${metric} == "netreceivebytes_max" ] || [ ${metric} == "nettransmitbytes_max" ] || [ ${metric} == "cnetreceivebytes_max" ] || [ ${metric} == "cnettransmitbytes_max" ] || [ ${metric} == "fsiototal_max" ] || [ ${metric} == "fsreadtotal_max" ] || [ ${metric} == "fswritetotal_max" ] || [ ${metric} == "request_count_max" ] || [ ${metric} == "request_sum_max" ]; then + maxval=$(echo `calcMax ${RESULTS_DIR_J}/${metric}-measure-temp.log`) + if [ ! -z ${maxval} ]; then + eval total_${metric}=${maxval} + else + eval total_${metric}=0 + fi + else + val=$(echo `calcAvg ${RESULTS_DIR_J}/${metric}-measure-temp.log | cut -d "=" -f2`) + if [ ! -z ${val} ]; then + eval total_${metric}_avg=${val} + else + eval total_${metric}_avg=0 + fi + fi + + # Calculate confidence interval + metric_ci=`php ${SCRIPT_REPO}/utils/ci.php ${RESULTS_DIR_J}/${metric}-measure-temp.log` + if [ ! -z ${metric_ci} ]; then + eval ci_${metric}=${metric_ci} + else + eval ci_${metric}=0 + fi + fi + done + + echo "INSTANCES , CPU_USAGE , MEM_RSS_USAGE , MEM_USAGE , DISKDETAILS_USAGE , NETTRANSMITBYTES_USAGE , NETRECEIVEBYTES_USAGE , CNETTRANSMITBYTES_USAGE , CNETRECEIVEBYTES_USAGE , FSIOTOTAL_USAGE , FSREADTOTAL_USAGE , FSWRITETOTAL_USAGE , CPU_MIN , CPU_MAX , MEM_RSS_MIN , MEM_RSS_MAX , MEM_MIN , MEM_MAX , DISKDETAILS_MIN , DISKDETAILS_MAX , NETTRANSMITBYTES_MIN , NETTRANSMITBYTES_MAX , NETRECEIVEBYTES_MIN , NETRECEIVEBYTES_MAX , CNETTRANSMITBYTES_MIN , CNETTRANSMITBYTES_MAX , CNETRECEIVEBYTES_MIN , CNETRECEIVEBYTES_MAX , FSIOTOTAL_MIN , FSIOTOTAL_MAX , FSREADTOTAL_MIN , FSREADTOTAL_MAX , FSWRITETOTAL_MIN , FSWRITETOTAL_MAX" > ${RESULTS_DIR_J}/../Metrics-prom.log + echo "${SCALE} , ${total_cpu_avg} , ${total_mem_avg} , ${total_memusage_avg} , ${total_diskdetails_avg} , ${total_nettransmitbytes_avg} , ${total_netreceivebytes_avg} , ${total_cnettransmitbytes_avg} , ${total_cnetreceivebytes_avg} , ${total_fsiototal_avg} , ${total_fsreadtotal_avg} , ${total_fswritetotal_avg} , ${total_cpu_min} , ${total_cpu_max} , ${total_mem_min} , ${total_mem_max} , ${total_memusage_min} , ${total_memusage_max} , ${total_diskdetails_min} , ${total_diskdetails_max} , ${total_nettransmitbytes_min} , ${total_nettransmitbytes_max} , ${total_netreceivebytes_min} , ${total_netreceivebytes_max} , ${total_cnettransmitbytes_min} , ${total_cnettransmitbytes_max} , ${total_cnetreceivebytes_min} , ${total_cnetreceivebytes_max} , ${total_fsiototal_min} , ${total_fsiototal_max} , ${total_fsreadtotal_min} , ${total_fsreadtotal_max} , ${total_fswritetotal_min} , ${total_fswritetotal_max}" >> ${RESULTS_DIR_J}/../Metrics-prom.log + echo "${SCALE} , ${total_mem_avg} , ${total_memusage_avg} " >> ${RESULTS_DIR_J}/../Metrics-mem-prom.log + echo "${SCALE} , ${total_cpu_avg} " >> ${RESULTS_DIR_J}/../Metrics-cpu-prom.log + echo "${SCALE} , ${total_maxspike_cpu_max} , ${total_maxspike_mem_max} " >> ${RESULTS_DIR_J}/../Metrics-spikes-prom.log +} + +POD_CPU_LOGS=(cpu) +POD_MEM_LOGS=(mem memusage) +POD_DISK_DETAILS_LOGS=(diskdetails) +POD_NW_LOGS=(netreceivebytes nettransmitbytes cnetreceivebytes cnettransmitbytes) +POD_IO_LOGS=(fsiototal fsreadtotal fswritetotal) +TOTAL_LOGS=(${POD_CPU_LOGS[@]} ${POD_MEM_LOGS[@]} ${POD_DISK_DETAILS_LOGS[@]} ${POD_NW_LOGS[@]} ${POD_IO_LOGS[@]} cpu_min cpu_max mem_min mem_max memusage_min memusage_max diskdetails_min diskdetails_max netreceivebytes_min netreceivebytes_max nettransmitbytes_min nettransmitbytes_max cnettransmitbytes_min cnettransmitbytes_max cnetreceivebytes_min cnetreceivebytes_max fsiototal_min , fsiototal_max , fsreadtotal_min , fsreadtotal_max , fswritetotal_min , fswritetotal_max) + +TOTAL_ITR=$1 +RESULTS_DIR_J=$2 +SCALE=$3 +WARMUPS=$4 +MEASURES=$5 +SCRIPT_REPO=$6 + +parseResults ${TOTAL_ITR} ${RESULTS_DIR_J} ${SCALE} ${WARMUPS} ${MEASURES} ${SCRIPT_REPO} diff --git a/renaissance/scripts/perf/renaissance-run.sh b/renaissance/scripts/perf/renaissance-run.sh new file mode 100755 index 00000000..905d66b3 --- /dev/null +++ b/renaissance/scripts/perf/renaissance-run.sh @@ -0,0 +1,304 @@ +#!/bin/bash +# +# Copyright (c) 2022, 2022 Red Hat, IBM Corporation and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +### Script to perform load test on multiple instances of renaissance benchmark on openshift/minikube### +# +CURRENT_DIR="$(dirname "$(realpath "$0")")" +source ${CURRENT_DIR}/../renaissance-common.sh +pushd "${CURRENT_DIR}" > /dev/null +pushd ".." > /dev/null +SCRIPT_REPO=${PWD} +pushd ".." > /dev/null +LOGFILE="${PWD}/setup.log" +K_CPU=2 +K_MEM=6144 +# checks if the previous command is executed successfully +# input:Return value of previous command +# output:Prompts the error message if the return value is not zero +function err_exit() { + if [ $? != 0 ]; then + printf "$*" + echo + echo "The run failed. See setup.log for more details" + ${K_EXEC} get pods -n ${NAMESPACE} >> ${LOGFILE} + ${K_EXEC} get events -n ${NAMESPACE} >> ${LOGFILE} + ${K_EXEC} logs pod/`${K_EXEC} get pods | grep "${APP_NAME}" | cut -d " " -f1` -n ${NAMESPACE} >> ${LOGFILE} + echo "1 , 99999 " >> ${RESULTS_DIR_ROOT}/Metrics-prom.log + cat ${RESULTS_DIR_ROOT}/Metrics-prom.log + ## Cleanup all the deployments + ${SCRIPT_REPO}/renaissance-cleanup.sh -c ${CLUSTER_TYPE} -n ${NAMESPACE} >> ${LOGFILE} + exit 1 + fi +} +# Run the benchmark as +# SCRIPT BENCHMARK_SERVER_NAME NAMESPACE RESULTS_DIR_PATH WARMUPS MEASURES TOTAL_INST TOTAL_ITR RE_DEPLOY DURATION BENCHMARK +# Ex of ARGS : --clustertype=openshift -s example.in.com -e /tfb/results -w 5 -m 3 -i 1 --iter=1 -r -d 60 -b page-rank +# Describes the usage of the script +function usage() { + echo + echo "Usage: $0 --clustertype=CLUSTER_TYPE -s BENCHMARK_SERVER -e RESULTS_DIR_PATH [-w WARMUPS] [-m MEASURES] [-i TOTAL_INST] [--iter=TOTAL_ITR] [-r= set redeploy to true] [-n NAMESPACE] [-g RENAISSANCE_IMAGE] [--cpureq=CPU_REQ] [--memreq=MEM_REQ] [--cpulim=CPU_LIM] [--memlim=MEM_LIM] [-b BENCHMARKS] [-R REPETITIONS] [-d DURATION] " + exit 1 +} +## Check if prometheus is running for valid benchmark results. +prometheus_pod_running=$(kubectl get pods --all-namespaces | grep "prometheus-k8s-0") +if [ "${prometheus_pod_running}" == "" ]; then + err_exit "Install prometheus for valid results from benchmark." +fi +# Iterate through the commandline options +while getopts s:e:w:m:i:rg:n:t:R:d:b:-: gopts +do + case ${gopts} in + -) + case "${OPTARG}" in + clustertype=*) + CLUSTER_TYPE=${OPTARG#*=} + ;; + iter=*) + TOTAL_ITR=${OPTARG#*=} + ;; + cpureq=*) + CPU_REQ=${OPTARG#*=} + ;; + memreq=*) + MEM_REQ=${OPTARG#*=} + ;; + cpulim=*) + CPU_LIM=${OPTARG#*=} + ;; + memlim=*) + MEM_LIM=${OPTARG#*=} + ;; + envoptions=*) + ENV_OPTIONS=${OPTARG#*=} + ;; + usertunables=*) + OPTIONS_VAR=${OPTARG#*=} + ;; + *) + esac + ;; + s) + BENCHMARK_SERVER="${OPTARG}" + ;; + e) + RESULTS_DIR_PATH="${OPTARG}" + ;; + w) + WARMUPS="${OPTARG}" + ;; + m) + MEASURES="${OPTARG}" + ;; + i) + TOTAL_INST="${OPTARG}" + ;; + r) + RE_DEPLOY="true" + ;; + g) + RENAISSANCE_IMAGE="${OPTARG}" + ;; + n) + NAMESPACE="${OPTARG}" + ;; + b) + BENCHMARKS="${OPTARG}" + ;; + R) + REPETITIONS="${OPTARG}" + ;; + d) + DURATION="${OPTARG}" + ;; + esac +done + +if [[ -z "${CLUSTER_TYPE}" || -z "${BENCHMARK_SERVER}" || -z "${RESULTS_DIR_PATH}" ]]; then + echo "Do set the variables - CLUSTER_TYPE, BENCHMARK_SERVER and RESULTS_DIR_PATH " + usage +fi + +if [ -z "${WARMUPS}" ]; then + WARMUPS=5 +fi + +if [ -z "${MEASURES}" ]; then + MEASURES=3 +fi + +if [ -z "${TOTAL_INST}" ]; then + TOTAL_INST=1 +fi + +if [ -z "${TOTAL_ITR}" ]; then + TOTAL_ITR=1 +fi + +if [ -z "${RE_DEPLOY}" ]; then + RE_DEPLOY=false +fi + +if [ -z "${RENAISSANCE_IMAGE}" ]; then + RENAISSANCE_IMAGE="prakalp23/renaissance1041:latest" +fi + +if [ -z "${NAMESPACE}" ]; then + NAMESPACE="default" +fi + +if [ -z "${REPETITIONS}" ]; then + REPETITIONS="20" +fi + +if [ -z "${BENCHMARKS}" ]; then + BENCHMARKS="page-rank" +fi + +if [ -z "${DURATION}" ]; then + DURATION="60" +fi + +if [[ ${CLUSTER_TYPE} == "openshift" ]]; then + K_EXEC="oc" +elif [[ ${CLUSTER_TYPE} == "minikube" ]]; then + K_EXEC="kubectl" +fi + +RESULTS_DIR_ROOT=${RESULTS_DIR_PATH}/renaissance-$(date +%Y%m%d%H%M) +mkdir -p ${RESULTS_DIR_ROOT} + +#Adding 5 secs buffer to retrieve CPU and MEM info +CPU_MEM_DURATION=`expr ${DURATION} + 5` +BENCHMARK_DURATION=`expr ${WARMUPS} + ${MEASURES}` +BENCHMARK_DURATION=`expr ${BENCHMARK_DURATION} \* ${DURATION}` +# Check if the application is running +# output: Returns 1 if the application is running else returns 0 +function check_app() { + if [ "${CLUSTER_TYPE}" == "openshift" ]; then + K_EXEC="oc" + elif [ "${CLUSTER_TYPE}" == "minikube" ]; then + K_EXEC="kubectl" + fi + CMD=$(${K_EXEC} get pods --namespace=${NAMESPACE} | grep "${APP_NAME}" | grep "Running" | cut -d " " -f1) + for status in "${CMD[@]}" + do + if [ -z "${status}" ]; then + echo "Application pod did not come up" >> ${LOGFILE} + ${K_EXEC} get pods -n ${NAMESPACE} >> ${LOGFILE} + ${K_EXEC} get events -n ${NAMESPACE} >> ${LOGFILE} + ${K_EXEC} logs pod/`${K_EXEC} get pods | grep "${APP_NAME}" | cut -d " " -f1` -n ${NAMESPACE} >> ${LOGFILE} + echo "The run failed. See setup.log for more details" + exit 1; + fi + done +} + +# Perform warmup and measure runs +# input: number of runs(warmup|measure), result directory +# output: Cpu info, memory info, node info, wrk load for each runs(warmup|measure) in the form of jason files +function runItr() +{ + TYPE=$1 + RUNS=$2 + RESULTS_DIR_L=$3 + for (( run=0; run<"${RUNS}"; run++ )) + do + # Check if the application is running + check_app + # Get CPU and MEM info through prometheus queries + ${SCRIPT_REPO}/perf/getmetrics-promql.sh ${TYPE}-${run} ${CPU_MEM_DURATION} ${RESULTS_DIR_L} ${BENCHMARK_SERVER} ${APP_NAME} ${CLUSTER_TYPE} & + # Sleep till the wrk load completes + sleep ${DURATION} + sleep 1 + done +} + +function runIterations() { + SCALING=$1 + TOTAL_ITR=$2 + WARMUPS=$3 + MEASURES=$4 + RESULTS_DIR_R=$5 + for (( itr=0; itr<"${TOTAL_ITR}"; itr++ )) + do + echo "***************************************" >> ${LOGFILE} + echo "Starting iteration ${itr}" >> ${LOGFILE} + echo "***************************************" >> ${LOGFILE} + if [ ${RE_DEPLOY} == "true" ]; then + echo "Deploying the application..." >> ${LOGFILE} + ${SCRIPT_REPO}/renaissance-deploy.sh --clustertype=${CLUSTER_TYPE} -s ${BENCHMARK_SERVER} -n ${NAMESPACE} -i ${SCALING} -g ${RENAISSANCE_IMAGE} --cpureq=${CPU_REQ} --memreq=${MEM_REQ} --cpulim=${CPU_LIM} --memlim=${MEM_LIM} --envoptions="${ENV_OPTIONS}" --usertunables="${OPTIONS_VAR}" -b ${BENCHMARKS} -t ${BENCHMARK_DURATION} >> ${LOGFILE} + fi + # Add extra sleep time for the deployment to complete as few machines takes longer time. + sleep 30 + + # Start the load + RESULTS_DIR_I=${RESULTS_DIR_R}/ITR-${itr} + echo "Running ${WARMUPS} warmups" >> ${LOGFILE} + # Perform warmup runs + runItr warmup ${WARMUPS} ${RESULTS_DIR_I} + echo "Running ${MEASURES} measures" >> ${LOGFILE} + # Perform measure runs + runItr measure ${MEASURES} ${RESULTS_DIR_I} + sleep 15 + echo "***************************************" >> ${LOGFILE} + echo "Completed iteration ${itr}..." >> ${LOGFILE} + echo "***************************************" >> ${LOGFILE} + done +} + +echo "INSTANCES , CPU_USAGE , MEM_USAGE , CPU_MIN , CPU_MAX , MEM_MIN , MEM_MAX " > ${RESULTS_DIR_ROOT}/Metrics-prom.log +echo ", OPERATION_TIME, WEB_ERRORS , OPTIME_CI " > ${RESULTS_DIR_ROOT}/Metrics-renaissance.log +echo ", CPU_REQ , MEM_REQ , CPU_LIM , MEM_LIM " > ${RESULTS_DIR_ROOT}/Metrics-config.log +echo ", DEPLOYMENT_NAME , NAMESPACE , IMAGE_NAME , CONTAINER_NAME" > ${RESULTS_DIR_ROOT}/deploy-config.log + +echo "INSTANCES , CLUSTER_CPU% , C_CPU_REQ% , C_CPU_LIM% , CLUSTER_MEM% , C_MEM_REQ% , C_MEM_LIM% " > ${RESULTS_DIR_ROOT}/Metrics-cluster.log +echo "RUN , CPU_REQ , MEM_REQ , CPU_LIM , MEM_LIM , CPU , CPU_MIN , CPU_MAX , MEM , MEM_MIN , MEM_MAX" > ${RESULTS_DIR_ROOT}/Metrics-raw.log +echo "INSTANCES , MEM_RSS , MEM_USAGE " > ${RESULTS_DIR_ROOT}/Metrics-mem-prom.log +echo "INSTANCES , CPU_USAGE" > ${RESULTS_DIR_ROOT}/Metrics-cpu-prom.log + + +echo "INSTANCES , CPU_MAXSPIKE , MEM_MAXSPIKE " >> ${RESULTS_DIR_ROOT}/Metrics-spikes-prom.log + + +echo ", ${CPU_REQ} , ${MEM_REQ} , ${CPU_LIM} , ${MEM_LIM} " >> ${RESULTS_DIR_ROOT}/Metrics-config.log +echo ", renaissance-sample , ${NAMESPACE} , ${renaissance_IMAGE} , renaissance" >> ${RESULTS_DIR_ROOT}/deploy-config.log +if [ ${CLUSTER_TYPE} == "minikube" ]; then + fwd_prometheus_port_minikube +fi +#TODO Create a function on how many DB inst required for a server. For now,defaulting it to 1 +# Scale the instances and run the iterations +for (( scale=1; scale<=${TOTAL_INST}; scale++ )) +do + RESULTS_SC=${RESULTS_DIR_ROOT}/scale_${scale} + echo "Run in progress..." + echo "***************************************" >> ${LOGFILE} + echo "Run logs are placed at... " ${RESULTS_DIR_ROOT} >> ${LOGFILE} + echo "***************************************" >> ${LOGFILE} + echo "Running the benchmark with ${scale} instances with ${TOTAL_ITR} iterations having ${WARMUPS} warmups and ${MEASURES} measurements" >> ${LOGFILE} + # Perform warmup and measure runs + runIterations ${scale} ${TOTAL_ITR} ${WARMUPS} ${MEASURES} ${RESULTS_SC} + echo "Parsing results for ${scale} instances" >> ${LOGFILE} + sleep 5 + ${SCRIPT_REPO}/perf/parsemetrics-promql.sh ${TOTAL_ITR} ${RESULTS_SC} ${scale} ${WARMUPS} ${MEASURES} ${SCRIPT_REPO} + +done + +sleep 10 +echo " " +# Display the Metrics log file +paste ${RESULTS_DIR_ROOT}/Metrics-prom.log ${RESULTS_DIR_ROOT}/Metrics-config.log ${RESULTS_DIR_ROOT}/deploy-config.log + +paste ${RESULTS_DIR_ROOT}/Metrics-prom.log ${RESULTS_DIR_ROOT}/deploy-config.log > ${RESULTS_DIR_ROOT}/output.csv diff --git a/renaissance/scripts/renaissance-cleanup.sh b/renaissance/scripts/renaissance-cleanup.sh new file mode 100644 index 00000000..99e7f2e6 --- /dev/null +++ b/renaissance/scripts/renaissance-cleanup.sh @@ -0,0 +1,88 @@ +#!/bin/bash +# +# Copyright (c) 2022, 2022 Red Hat, IBM Corporation and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +### Script to remove the galaxies setup ### +# +CURRENT_DIR="$(dirname "$(realpath "$0")")" +source ${CURRENT_DIR}/renaissance-common.sh +function usage() { + echo + echo "Usage: -c CLUSTER_TYPE[docker|minikube] [-n NAMESPACE]" + exit 1 +} + +if [ "$#" -lt 1 ]; then + usage +fi + +while getopts c:n:-: gopts +do + case ${gopts} in + c) + CLUSTER_TYPE=${OPTARG} + ;; + n) + NAMESPACE="${OPTARG}" + ;; + esac +done + +if [ -z "${CLUSTER_TYPE}" ]; then + usage +fi + +if [ -z "${NAMESPACE}" ]; then + NAMESPACE="${DEFAULT_NAMESPACE}" +fi +# Removes the renaissance instances +# output: Removes the renaissance and renaissance deployments, services, service monitors and routes +function remove_renaissance() { + renaissance_DEPLOYMENTS=($(${K_EXEC} get deployments --namespace=${NAMESPACE} | grep -e "${APP_NAME}" | cut -d " " -f1)) + + for de in "${renaissance_DEPLOYMENTS[@]}" + do + ${K_EXEC} delete deployment ${de} --namespace=${NAMESPACE} + done + + #Delete the services and routes if any + renaissance_SERVICES=($(${K_EXEC} get svc --namespace=${NAMESPACE} | grep -e "${APP_NAME}" | cut -d " " -f1)) + for se in "${renaissance_SERVICES[@]}" + do + ${K_EXEC} delete svc ${se} --namespace=${NAMESPACE} + done + renaissance_SERVICE_MONITORS=($(${K_EXEC} get servicemonitor --namespace=${NAMESPACE} | grep -e "${APP_NAME}" | cut -d " " -f1)) + for sm in "${renaissance_SERVICE_MONITORS[@]}" + do + ${K_EXEC} delete servicemonitor ${sm} --namespace=${NAMESPACE} + done + + if [[ ${CLUSTER_TYPE} == "openshift" ]]; then + renaissance_ROUTES=($(${K_EXEC} get route --namespace=${NAMESPACE} | grep -e "${APP_NAME}" | cut -d " " -f1)) + for ro in "${renaissance_ROUTES[@]}" + do + ${K_EXEC} delete route ${ro} --namespace=${NAMESPACE} + done + fi +} +if [[ ${CLUSTER_TYPE} == "openshift" ]]; then + K_EXEC="oc" +elif [[ ${CLUSTER_TYPE} == "minikube" ]]; then + K_EXEC="kubectl" +fi + +echo -n "Removing the renaissance instances..." +remove_renaissance >> ${LOGFILE} +echo "done" diff --git a/renaissance/scripts/renaissance-common.sh b/renaissance/scripts/renaissance-common.sh new file mode 100644 index 00000000..c22a61b5 --- /dev/null +++ b/renaissance/scripts/renaissance-common.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# +# Copyright (c) 2022,2022 Red Hat, IBM Corporation and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +### Script containing common functions ### +CURRENT_DIR="$(dirname "$(realpath "$0")")" +pushd "${CURRENT_DIR}" > /dev/null +pushd ".." > /dev/null + +# Set the defaults for the app +LOGFILE="${PWD}/setup.log" +RENAISSANCE_REPO="${CURRENT_DIR}" +BENCHMARK_IMAGE="prakalp23/renaissance1041:latest" +DEFAULT_NAMESPACE="default" +MANIFESTS_DIR="manifests/" +APP_NAME="renaissance" + +# checks if the previous command is executed successfully +# input:Return value of previous command +# output:Prompts the error message if the return value is not zero +function err_exit() { + if [ $? != 0 ]; then + printf "$*" + echo + echo "See ${LOGFILE} for more details" + exit -1 + fi +} + +# Get the IP addr of the machine / vm that we are running on +function get_ip() { + IP_ADDR=$(ip addr | grep "global" | grep "dynamic" | awk '{ print $2 }' | cut -f 1 -d '/') + if [ -z "${IP_ADDR}" ]; then + IP_ADDR=$(ip addr | grep "global" | head -1 | awk '{ print $2 }' | cut -f 1 -d '/') + fi +} + +# Check if the application is running +# output: Returns 1 if the application is running else returns 0 +function check_app() { + if [ "${CLUSTER_TYPE}" == "openshift" ]; then + K_EXEC="oc" + elif [ "${CLUSTER_TYPE}" == "minikube" ]; then + K_EXEC="kubectl" + fi + CMD=$(${K_EXEC} get pods --namespace=${NAMESPACE} | grep "${APP_NAME}" | grep "Running" | cut -d " " -f1) + for status in "${CMD[@]}" + do + if [ -z "${status}" ]; then + echo "Application pod did not come up" >> ${LOGFILE} + ${K_EXEC} get pods -n ${NAMESPACE} >> ${LOGFILE} + ${K_EXEC} get events -n ${NAMESPACE} >> ${LOGFILE} + ${K_EXEC} logs pod/`${K_EXEC} get pods | grep "${APP_NAME}" | cut -d " " -f1` -n ${NAMESPACE} >> ${LOGFILE} + echo "The run failed. See setup.log for more details" + exit -1; + fi + done +} + +## Forward the prometheus port to collect the metrics +function fwd_prometheus_port_minikube() { + kubectl port-forward pod/prometheus-k8s-0 9090:9090 -n monitoring >> ${LOGFILE} 2>&1 & +} diff --git a/renaissance/scripts/renaissance-deploy.sh b/renaissance/scripts/renaissance-deploy.sh new file mode 100644 index 00000000..1c4c1843 --- /dev/null +++ b/renaissance/scripts/renaissance-deploy.sh @@ -0,0 +1,250 @@ +#!/bin/bash +# +# Copyright (c) 2022,2022 Red Hat, IBM Corporation and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +### Script to deploy the one or more instances of tfb application on openshift### +# + +CURRENT_DIR="$(dirname "$(realpath "$0")")" +source ${CURRENT_DIR}/renaissance-common.sh + +SERVER_INSTANCES=1 +BENCHMARK_IMAGE="prakalp23/renaissance1041:latest" +NAMESPACE="${DEFAULT_NAMESPACE}" + +# Run the benchmark as +# SCRIPT BENCHMARK_SERVER +# Ex of ARGS : --clustertype=openshift -s example.in.com -i 2 -g kruize/tfb-qrh:1.13.2.F_mm.v1 + +# Describes the usage of the script +function usage() { + echo + echo "Usage: $0 --clustertype=CLUSTER_TYPE [-s BENCHMARK_SERVER] [-i SERVER_INSTANCES] [-n NAMESPACE] [-g RENAISSANCE_IMAGE] [--cpureq=CPU_REQ] [--memreq=MEM_REQ] [--cpulim=CPU_LIM] [--memlim=MEM_LIM] [-b BENCHMARKS] [-t BENCHMARK_DURATION]" + echo " " + echo "Example: $0 --clustertype=minikube -s example.in.com -i 2 -g prakalp23/renaissance1041:latest --cpulim=4 --cpureq=2 --memlim=1024Mi --memreq=512Mi -b page-rank -t 480" + exit 1 +} + +# Check if the memory request/limit has unit. If not ask user to append the unit +# input: Memory request/limit passed by user +# output: Check memory request/limit for unit , if not specified suggest the user to specify the unit +function check_memory_unit() { + MEM=$1 + case "${MEM}" in + [0-9]*M) + ;; + [0-9]*Mi) + ;; + [0-9]*K) + ;; + [0-9]*Ki) + ;; + [0-9]*G) + ;; + [0-9]*Gi) + ;; + *) + echo "Error : Do specify the memory Unit" + echo "Example: ${MEM}K/Ki/M/Mi/G/Gi" + usage + ;; + esac +} + +# Iterate through the commandline options +while getopts s:i:g:n:b:t:-: gopts +do + case ${gopts} in + -) + case "${OPTARG}" in + clustertype=*) + CLUSTER_TYPE=${OPTARG#*=} + ;; + + cpureq=*) + CPU_REQ=${OPTARG#*=} + ;; + memreq=*) + MEM_REQ=${OPTARG#*=} + ;; + cpulim=*) + CPU_LIM=${OPTARG#*=} + ;; + memlim=*) + MEM_LIM=${OPTARG#*=} + ;; + envoptions=*) + JDK_JAVA_OPTIONS=${OPTARG#*=} + ;; + usertunables=*) + OPTIONS_VAR=${OPTARG#*=} + ;; + *) + esac + ;; + s) + BENCHMARK_SERVER="${OPTARG}" + ;; + i) + SERVER_INSTANCES="${OPTARG}" + ;; + g) + RENAISSANCE_IMAGE="${OPTARG}" + ;; + n) + NAMESPACE="${OPTARG}" + ;; + b) + BENCHMARKS="${OPTARG}" + ;; + t) + BENCHMARK_DURATION="${OPTARG}" + ;; + esac +done + +if [ -z "${CLUSTER_TYPE}" ]; then + echo "Do set the variable - CLUSTER_TYPE " + usage + exit 1 +fi + +# check memory limit for unit +if [ ! -z "${MEM_LIM}" ]; then + check_memory_unit ${MEM_LIM} +fi + +# check memory request for unit +if [ ! -z "${MEM_REQ}" ]; then + check_memory_unit ${MEM_REQ} +fi + +if [[ ${CLUSTER_TYPE} == "openshift" ]]; then + K_EXEC="oc" +elif [[ ${CLUSTER_TYPE} == "minikube" ]]; then + K_EXEC="kubectl" +fi +# Create multiple yamls based on instances and Update the template yamls with names and create multiple files +# input:quarkus-resteasy-hibernate , postgres and service-monitor yaml file +function createInstances() { + #Create the deployments and services + for(( inst=0; inst<"${SERVER_INSTANCES}"; inst++ )) + do + sed "s/name: ${APP_NAME}/name: ${APP_NAME}-${inst}/g" ${MANIFESTS_DIR}/service-monitor.yaml > ${MANIFESTS_DIR}/service-monitor-${inst}.yaml + sed -i "s/${APP_NAME}-app/${APP_NAME}-app-${inst}/g" ${MANIFESTS_DIR}/service-monitor-${inst}.yaml + sed -i "s/${APP_NAME}-port/${APP_NAME}-port-${inst}/g" ${MANIFESTS_DIR}/service-monitor-${inst}.yaml + ${K_EXEC} create -f ${MANIFESTS_DIR}/service-monitor-${inst}.yaml -n ${NAMESPACE} + done + + for(( inst=0; inst<"${SERVER_INSTANCES}"; inst++ )) + do + sed "s/${APP_NAME}-sample/${APP_NAME}-sample-${inst}/g" ${MANIFESTS_DIR}/renaissance.yaml > ${MANIFESTS_DIR}/renaissance-${inst}.yaml + sed -i "s|${BENCHMARK_IMAGE}|${RENAISSANCE_IMAGE}|g" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + sed -i "s/${APP_NAME}-service/${APP_NAME}-service-${inst}/g" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + sed -i "s/${APP_NAME}-app/${APP_NAME}-app-${inst}/g" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + sed -i "s/${APP_NAME}-port/${APP_NAME}-port-${inst}/g" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + + # Setting cpu/mem request limits + if [ ! -z ${MEM_REQ} ]; then + sed -i '/requests:/a \ \ \ \ \ \ \ \ \ \ memory: '${MEM_REQ}'' ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + if [ ! -z ${CPU_REQ} ]; then + sed -i '/requests:/a \ \ \ \ \ \ \ \ \ \ cpu: '${CPU_REQ}'' ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + if [ ! -z ${MEM_LIM} ]; then + sed -i '/limits:/a \ \ \ \ \ \ \ \ \ \ memory: '${MEM_LIM}'' ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + if [ ! -z ${CPU_LIM} ]; then + sed -i '/limits:/a \ \ \ \ \ \ \ \ \ \ cpu: '${CPU_LIM}'' ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + if [ ! -z ${BENCHMARKS} ]; then + sed -i "/env:/a \ \ \ \ \ \ \ \ \ \ \ \ value: \"${BENCHMARKS}\"" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + sed -i '/env:/a \ \ \ \ \ \ \ \ \ \ - name: "BENCHMARK"' ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + echo "MESSAGE" ${BENCHMARKS} + if [ ! -z ${BENCHMARK_DURATION} ]; then + sed -i "/env:/a \ \ \ \ \ \ \ \ \ \ \ \ value: \"${BENCHMARK_DURATION}\"" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + sed -i '/env:/a \ \ \ \ \ \ \ \ \ \ - name: "TIME_LIMIT"' ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + tunables_jvm_boolean=(TieredCompilation AllowParallelDefineClass AllowVectorizeOnDemand AlwaysCompileLoopMethods AlwaysPreTouch AlwaysTenure BackgroundCompilation DoEscapeAnalysis UseInlineCaches UseLoopPredicate UseStringDeduplication UseSuperWord UseTypeSpeculation) + tunables_jvm_values=(FreqInlineSize MaxInlineLevel MinInliningThreshold CompileThreshold CompileThresholdScaling ConcGCThreads InlineSmallCode LoopUnrollLimit LoopUnrollMin MinSurvivorRatio NewRatio TieredStopAtLevel) + user_options=$(echo ${OPTIONS_VAR} | tr ";" "\n") + + OPTIONS_VAR="" + for useroption in ${user_options} + do + OPTIONS_VAR="${OPTIONS_VAR} ${useroption}" + done + + for btunable in "${tunables_jvm_boolean[@]}" + do + if [ ! -z ${!btunable} ]; then + if [ ${!btunable} == "true" ]; then + OPTIONS_VAR="${OPTIONS_VAR} -XX:+${btunable}" + else + OPTIONS_VAR="${OPTIONS_VAR} -XX:-${btunable}" + fi + fi + done + + for jvtunable in "${tunables_jvm_values[@]}" + do + if [ ! -z ${!jvtunable} ]; then + OPTIONS_VAR="${OPTIONS_VAR} -XX:${jvtunable}=${!jvtunable}" + fi + done + + if [ ! -z "${OPTIONS_VAR}" ]; then + sed -i "s/\"-server\"/\"${OPTIONS_VAR}\"/" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + + if [ ! -z "${JDK_JAVA_OPTIONS}" ]; then + sed -i "/env:/a \ \ \ \ \ \ \ \ \ \ \ \ value: \"${JDK_JAVA_OPTIONS}\"" ${MANIFESTS_DIR}/renaissance-${inst}.yaml + sed -i '/env:/a \ \ \ \ \ \ \ \ \ \ - name: "JDK_JAVA_OPTIONS"' ${MANIFESTS_DIR}/renaissance-${inst}.yaml + fi + + + ${K_EXEC} create -f ${MANIFESTS_DIR}/renaissance-${inst}.yaml -n ${NAMESPACE} + #err_exit "Error: Issue in deploying ${APP_NAME}." >> ${LOGFILE} + + done + + #Wait till ${APP_NAME} starts + sleep 20 + + #Expose the services + if [[ ${CLUSTER_TYPE} == "openshift" ]]; then + SVC_LIST=($(${K_EXEC} get svc --namespace=${NAMESPACE} | grep "service" | grep "${APP_NAME}" | cut -d " " -f1)) + for sv in "${SVC_LIST[@]}" + do + ${K_EXEC} expose svc/${sv} --namespace=${NAMESPACE} + #err_exit " Error: Issue in exposing service" >> ${LOGFILE} + done + fi + +} + + +# Delete the renaissance and renaissance-database deployments,services and routes if it is already present +function stopAllInstances() { + ${RENAISSANCE_REPO}/renaissance-cleanup.sh -c ${CLUSTER_TYPE} -n ${NAMESPACE} >> ${LOGFILE} + sleep 30 + +} + +# Stop all renaissance related instances if there are any +stopAllInstances +# Deploying instances +createInstances ${SERVER_INSTANCES} diff --git a/renaissance/scripts/utils/common.sh b/renaissance/scripts/utils/common.sh new file mode 100755 index 00000000..a40ccd57 --- /dev/null +++ b/renaissance/scripts/utils/common.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# +# Copyright (c) 2020, 2021 Red Hat, IBM Corporation and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +# Calculate average in MB +# input: Result directory +# output: Average in MB +function calcAvg_inMB() +{ + LOG=$1 + if [ -s ${LOG} ]; then + sed -i '/^$/d' ${LOG} + if [ -s ${LOG} ]; then + awk '{sum+=$1} END { print " Average =",sum/NR/1024/1024}' ${LOG} ; + fi + fi +} + +# Calculate average in percentage +# input: Result directory +# output: Average in percentage +function calcAvg_in_p() +{ + LOG=$1 + if [ -s ${LOG} ]; then + sed -i '/^$/d' ${LOG} + if [ -s ${LOG} ]; then + awk '{sum+=$1} END { print " % Average =",sum/NR*100}' ${LOG} ; + fi + fi +} + +# Calculate average +# input: Result directory +# output: Average +function calcAvg() +{ + LOG=$1 + if [ -s ${LOG} ]; then + sed -i '/^$/d' ${LOG} + if [ -s ${LOG} ]; then + awk '{sum+=$1} END { print " Average =",sum/NR}' ${LOG} ; + fi + fi +} + +#Calculate Median +# input: Result directory +# output: Median +function calcMedian() +{ + LOG=$1 + if [ -s ${LOG} ]; then + sed -i '/^$/d' ${LOG} + sort -n ${LOG} | awk ' { a[i++]=$1; } END { x=int((i+1)/2); if (x < (i+1)/2) print " Median =",(a[x-1]+a[x])/2; else print " Median =",a[x-1]; }' + fi +} + +# Calculate minimum +# input: Result directory +# output: Minimum value +function calcMin() +{ + LOG=$1 + if [ -s ${LOG} ]; then + sed -i '/^$/d' ${LOG} + sort -n ${LOG} | head -1 + fi +} + +# Calculate maximum +# input: Result directory +# output: Maximum value +function calcMax() { + LOG=$1 + if [ -s ${LOG} ]; then + sed -i '/^$/d' ${LOG} + sort -n ${LOG} | tail -1 + fi +}