diff --git a/benchmark/Dockerfile b/benchmark/Dockerfile new file mode 100644 index 000000000..52ab7a92f --- /dev/null +++ b/benchmark/Dockerfile @@ -0,0 +1,66 @@ +# Dacapo download +FROM debian:bookworm-slim as benchmarks +RUN apt-get update \ + && apt-get -y install wget unzip libc6 \ + && apt-get -y clean \ + && rm -rf /var/lib/apt/lists/* + +ARG DACAPO_VERSION=23.11-chopin +# The data for the big benchmarks is removed too ensure the final docker image is not too big +RUN wget -nv -O dacapo.zip https://download.dacapobench.org/chopin/dacapo-$DACAPO_VERSION.zip \ + && mkdir /dacapo \ + && unzip dacapo.zip -d /dacapo/ \ + && rm -rf /dacapo/dacapo-$DACAPO_VERSION/dat/luindex \ + && rm -rf /dacapo/dacapo-$DACAPO_VERSION/dat/lusearch \ + && rm -rf /dacapo/dacapo-$DACAPO_VERSION/dat/graphchi \ + && rm dacapo.zip + +# Download and install renaissance benchmark +ARG RENAISSANCE_VERSION=0.16.0 +RUN mkdir /renaissance \ + && wget -nv -O ./renaissance/renaissance-gpl.jar https://github.com/renaissance-benchmarks/renaissance/releases/download/v${RENAISSANCE_VERSION}/renaissance-gpl-${RENAISSANCE_VERSION}.jar + +FROM debian:bookworm-slim + +RUN apt-get update \ + && apt-get -y install git curl wget procps gettext-base \ + && apt-get -y clean \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=eclipse-temurin:8-jammy /opt/java/openjdk /usr/lib/jvm/8 +COPY --from=eclipse-temurin:11-jammy /opt/java/openjdk /usr/lib/jvm/11 +COPY --from=eclipse-temurin:17-jammy /opt/java/openjdk /usr/lib/jvm/17 + +RUN rm -rf \ + /usr/lib/jvm/*/man \ + /usr/lib/jvm/*/src.zip \ + /usr/lib/jvm/*/lib/src.zip \ + /usr/lib/jvm/*/demo \ + /usr/lib/jvm/*/sample + +ENV JAVA_8_HOME=/usr/lib/jvm/8 +ENV JAVA_11_HOME=/usr/lib/jvm/11 +ENV JAVA_17_HOME=/usr/lib/jvm/17 +ENV JAVA_HOME=${JAVA_8_HOME} +ENV PATH=${PATH}:${JAVA_HOME}/bin + +ARG SIRUN_VERSION=0.1.11 +RUN wget -O sirun.tar.gz https://github.com/DataDog/sirun/releases/download/v$SIRUN_VERSION/sirun-v$SIRUN_VERSION-x86_64-unknown-linux-musl.tar.gz \ + && tar -xzf sirun.tar.gz \ + && rm sirun.tar.gz \ + && mv sirun /usr/bin/sirun + +ARG K6_VERSION=0.45.1 +RUN wget -O k6.tar.gz https://github.com/grafana/k6/releases/download/v$K6_VERSION/k6-v$K6_VERSION-linux-amd64.tar.gz \ + && tar --strip-components=1 -xzf k6.tar.gz \ + && rm k6.tar.gz \ + && mv k6 /usr/bin/k6 + +RUN mkdir -p /app + +COPY --from=benchmarks /dacapo/ /app/ +ARG DACAPO_VERSION=23.11-chopin +ENV DACAPO=/app/dacapo-$DACAPO_VERSION.jar + +COPY --from=benchmarks /renaissance/ /app/ +ENV RENAISSANCE=/app/renaissance-gpl.jar diff --git a/benchmark/README.md b/benchmark/README.md new file mode 100644 index 000000000..30f3bbcf8 --- /dev/null +++ b/benchmark/README.md @@ -0,0 +1,29 @@ +# Benchmarks + +This directory contains different types of benchmarks. + +## Running Benchmarks via Docker + +Docker allows the execution of benchmarks without needing to install and configure your development environment. For example, package installation and installation of sirun are performed automatically. + +In order to run benchmarks using Docker, issue the following command from the `benchmark/` folder of this project: + +```sh +./run.sh +``` + +If you run into storage errors (e.g. running out of disk space), try removing all unused Docker containers, networks, and images with `docker system prune -af` before running the script again. Once finished, the reports will be available in the `benchmark/reports/` folder. Note that the script can take ~40 minutes to run. + +### Running specific benchmarks + +If you want to run only a specific category of benchmarks, you can do so via arguments: + +1. Run startup benchmarks +```sh +./run.sh startup [application]? +``` + +2. Run load benchmarks +```sh +./run.sh load [application]? +``` diff --git a/benchmark/benchmarks.sh b/benchmark/benchmarks.sh new file mode 100755 index 000000000..09e00b6fd --- /dev/null +++ b/benchmark/benchmarks.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -eu + +echo "Running benchmarks ..." + +readonly SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +export PROFILER_DIR="${SCRIPT_DIR}/.." +export REPORTS_DIR="${SCRIPT_DIR}/reports" +export UTILS_DIR="${SCRIPT_DIR}/utils" +export SHELL_UTILS_DIR="${UTILS_DIR}/shell" +export K6_UTILS_DIR="${UTILS_DIR}/k6" +export PROFILER="${SCRIPT_DIR}/profiler/libjavaProfiler.so" + +run_benchmarks() { + local type=$1 + if [[ -d "${type}" ]] && [[ -f "${type}/run.sh" ]]; then + cd "${type}" + ./run.sh "$@" + cd "${SCRIPT_DIR}" + fi +} + +# Find or rebuild profiler to be used in the benchmarks +if [[ ! -f "${PROFILER}" ]]; then + mkdir -p "${SCRIPT_DIR}/profiler" + cd "${PROFILER_DIR}" + ARCH=$(uname -p) + if [ $ARCH eq "x86_64" ]; then + ARCH="x64" + fi + + readonly PROFILER_VERSION=$(./gradlew properties -q | grep "version:" | awk '{print $2}') + readonly PROFILER_COMPILED="${SCRIPT_DIR}/../ddprof-lib/build/lib/main/release/linux/${ARCH}/libjavaProfiler.so" + if [[ ! -f "${PROFILER_COMPILED}" ]]; then + echo "Profiler not found, starting gradle compile ..." + ./gradlew assemble + fi + cp "${PROFILER_COMPILED}" "${PROFILER}" + cd "${SCRIPT_DIR}" +fi + +if [[ "$#" == '0' ]]; then + for type in 'dacapo' 'renaissance'; do + run_benchmarks "$type" + done +else + run_benchmarks "$@" +fi diff --git a/benchmark/dacapo/benchmark.json b/benchmark/dacapo/benchmark.json new file mode 100644 index 000000000..d7d9cdad7 --- /dev/null +++ b/benchmark/dacapo/benchmark.json @@ -0,0 +1,21 @@ +{ + "name": "dacapo_${BENCHMARK}", + "setup": "bash -c \"mkdir -p ${OUTPUT_DIR}/${VARIANT}\"", + "run": "bash -c \"java ${JAVA_OPTS} -jar ${DACAPO} --converge --scratch-directory=${OUTPUT_DIR}/${VARIANT}/scratch --latency-csv ${BENCHMARK} &> ${OUTPUT_DIR}/${VARIANT}/dacapo.log\"", + "timeout": 150, + "iterations": 1, + "variants": { + "baseline": { + "env": { + "VARIANT": "baseline", + "JAVA_OPTS": "" + } + }, + "profiling": { + "env": { + "VARIANT": "profiling", + "JAVA_OPTS": "-agentpath:${PROFILER}=start,wall=10ms,file=${OUTPUT_DIR}/${VARIANT}/profiler.jfr" + } + } + } +} diff --git a/benchmark/dacapo/run.sh b/benchmark/dacapo/run.sh new file mode 100755 index 000000000..ece44f9e5 --- /dev/null +++ b/benchmark/dacapo/run.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +set -eu + +source "${UTILS_DIR}/update-java-version.sh" 11 + +function message() { + echo "$(date +"%T"): $1" +} + +run_benchmark() { + local type=$1 + + message "dacapo benchmark: ${type} started" + + # export the benchmark + export BENCHMARK="${type}" + + # create output folder for the test + export OUTPUT_DIR="${REPORTS_DIR}/dacapo/${type}" + mkdir -p "${OUTPUT_DIR}" + + # substitute environment variables in the json file + benchmark=$(mktemp) + # shellcheck disable=SC2046 + # shellcheck disable=SC2016 + envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "${benchmark}" + + # run the sirun test + sirun "${benchmark}" &>"${OUTPUT_DIR}/${type}.json" + + message "dacapo benchmark: ${type} finished" +} + +if [ "$#" == '2' ]; then + run_benchmark "$2" +else + for benchmark in biojava tomcat ; do + run_benchmark "${benchmark}" + done +fi + diff --git a/benchmark/renaissance/benchmark.json b/benchmark/renaissance/benchmark.json new file mode 100644 index 000000000..77d92d604 --- /dev/null +++ b/benchmark/renaissance/benchmark.json @@ -0,0 +1,21 @@ +{ + "name": "renassance_${BENCHMARK}", + "setup": "bash -c \"mkdir -p ${OUTPUT_DIR}/${VARIANT}/scratch\"", + "run": "bash -c \"java ${JAVA_OPTS} -jar ${RENAISSANCE} --scratch-base=${OUTPUT_DIR}/${VARIANT}/scratch --json ${OUTPUT_DIR}/${VARIANT}/${BENCHMARK}.json ${BENCHMARK} \"", + "timeout": 150, + "iterations": 1, + "variants": { + "baseline": { + "env": { + "VARIANT": "baseline", + "JAVA_OPTS": "" + } + }, + "profiling": { + "env": { + "VARIANT": "profiling", + "JAVA_OPTS": "-agentpath:${PROFILER}=start,wall=10ms,file=${OUTPUT_DIR}/${VARIANT}/profiler.jfr" + } + } + } +} diff --git a/benchmark/renaissance/run.sh b/benchmark/renaissance/run.sh new file mode 100755 index 000000000..5f095d644 --- /dev/null +++ b/benchmark/renaissance/run.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -eu + +source "${UTILS_DIR}/update-java-version.sh" 17 + +function message() { + echo "$(date +"%T"): $1" +} + +run_benchmark() { + local type=$1 + + message "renaissance benchmark: ${type} started" + + # export the benchmark + export BENCHMARK="${type}" + + # create output folder for the test + export OUTPUT_DIR="${REPORTS_DIR}/renaissance/${type}" + mkdir -p "${OUTPUT_DIR}" + + # substitute environment variables in the json file + benchmark=$(mktemp) + # shellcheck disable=SC2016 + envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "${benchmark}" + + # run the sirun test + sirun "${benchmark}" &>"${OUTPUT_DIR}/${type}.json" + + message "renaissance benchmark: ${type} finished" +} + +if [ "$#" == '2' ]; then + run_benchmark "$2" +else + for benchmark in akka-uct neo4j-analytics ; do + run_benchmark "${benchmark}" + done +fi + diff --git a/benchmark/run.sh b/benchmark/run.sh new file mode 100755 index 000000000..c738b42d9 --- /dev/null +++ b/benchmark/run.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -eu + +readonly SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +readonly INITIAL_DIR="$(pwd)" +export PROFILER="${SCRIPT_DIR}/profiler/libjavaProfiler.so" + +cd "${SCRIPT_DIR}" + +# Build container image +echo "Building base image ..." +docker build \ + -t java-profiler/benchmark \ + . + +# Find or rebuild profiler to be used in the benchmarks +if [[ ! -f "${PROFILER}" ]]; then + mkdir -p "${SCRIPT_DIR}/profiler" + cd "${SCRIPT_DIR}/.." + + ARCH=$(uname -p) + if [[ "$ARCH" == "x86_64" ]]; + then + ARCH="x64" + elif [[ "$ARCH" == "aarch64" ]]; + then + ARCH="arm64" + fi + + readonly PROFILER_VERSION=$(./gradlew properties -q | grep "version:" | awk '{print $2}') + readonly PROFILER_COMPILED="${SCRIPT_DIR}/../ddprof-lib/build/lib/main/release/linux/${ARCH}/libjavaProfiler.so" + if [ ! -f "${PROFILER_COMPILED}" ]; then + echo "Profiler not found, starting gradle compile ..." + ./gradlew assemble + fi + cp "${PROFILER_COMPILED}" "${PROFILER}" + cd "${SCRIPT_DIR}" +fi + +# Trigger benchmarks +docker run --rm \ + -v "${HOME}/.gradle":/home/benchmark/.gradle:delegated \ + -v "${PWD}/..":/profiler:delegated \ + -w /profiler/benchmark \ + -e GRADLE_OPTS="-Dorg.gradle.daemon=false" \ + --entrypoint=./benchmarks.sh \ + --name java-profiler-benchmark \ + --cap-add SYS_ADMIN \ + java-profiler/benchmark \ + "$@" + +cd "${INITIAL_DIR}" diff --git a/benchmark/utils/k6.js b/benchmark/utils/k6.js new file mode 100644 index 000000000..aa5147ae3 --- /dev/null +++ b/benchmark/utils/k6.js @@ -0,0 +1,21 @@ +import {check} from 'k6'; + +export function checkResponse(response) { + const checks = Array.prototype.slice.call(arguments, 1); + const reduced = checks.reduce((result, current) => Object.assign(result, current), {}); + check(response, reduced); +} + +export const isOk = { + 'is OK': r => r.status === 200 +}; + +export const isRedirect = { + 'is redirect': r => r.status >= 300 && r.status < 400 +}; + +export function bodyContains(text) { + return { + 'body contains': r => r.body.includes(text) + } +} diff --git a/benchmark/utils/run-k6-load-test.sh b/benchmark/utils/run-k6-load-test.sh new file mode 100755 index 000000000..d3415f54e --- /dev/null +++ b/benchmark/utils/run-k6-load-test.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -eu + +command=$1 +exit_code=0 + +cleanup() { + # run the exit command + bash -c "${command}" + exit $exit_code +} + +trap cleanup EXIT ERR INT TERM + +echo "Starting k6 load test, logs are recorded into ${LOGS_DIR}/k6.log..." + +# run the k6 benchmark and store the result as JSON +k6 run k6.js --out "json=${OUTPUT_DIR}/k6_$(date +%s).json" > "${LOGS_DIR}/k6.log" 2>&1 +exit_code=$? + +echo "k6 load test done !!!" diff --git a/benchmark/utils/run-on-server-ready.sh b/benchmark/utils/run-on-server-ready.sh new file mode 100755 index 000000000..2aad5aa9f --- /dev/null +++ b/benchmark/utils/run-on-server-ready.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -eu + +url=$1 +command=$2 +# wait for an HTTP server to come up and runs the selected command +while true; do + if [[ $(curl -fso /dev/null -w "%{http_code}" "${url}") = 200 ]]; then + bash -c "${command}" + fi +done diff --git a/benchmark/utils/run-sirun-benchmarks.sh b/benchmark/utils/run-sirun-benchmarks.sh new file mode 100755 index 000000000..c0bc732dc --- /dev/null +++ b/benchmark/utils/run-sirun-benchmarks.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +set -eu + +function message() { + echo "$(date +"%T"): $1" +} + +run_benchmark() { + local type=$1 + local app=$2 + if [[ -d "${app}" ]] && [[ -f "${app}/benchmark.json" ]]; then + + message "${type} benchmark: ${app} started" + cd "${app}" + + # create output folder for the test + export OUTPUT_DIR="${REPORTS_DIR}/${type}/${app}" + mkdir -p "${OUTPUT_DIR}" + + # substitute environment variables in the json file + benchmark=$(mktemp) + # shellcheck disable=SC2046 + # shellcheck disable=SC2016 + envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "${benchmark}" + + # run the sirun test + sirun "${benchmark}" &>"${OUTPUT_DIR}/${app}.json" + + message "${type} benchmark: ${app} finished" + + cd .. + fi +} + +if [ "$#" == '2' ]; then + run_benchmark "$@" +else + for folder in *; do + run_benchmark "$1" "${folder}" + done +fi diff --git a/benchmark/utils/update-java-version.sh b/benchmark/utils/update-java-version.sh new file mode 100755 index 000000000..3d76603e0 --- /dev/null +++ b/benchmark/utils/update-java-version.sh @@ -0,0 +1,5 @@ +readonly target=$1 +readonly NEW_PATH=$(echo "${PATH}" | sed -e "s@/usr/lib/jvm/[[:digit:]]\+@/usr/lib/jvm/${target}@g") +export PATH="${NEW_PATH}" + +java --version diff --git a/ddprof-lib/benchmarks/build.gradle b/ddprof-lib/benchmarks/build.gradle index c6bd1db5c..fd3ded88a 100644 --- a/ddprof-lib/benchmarks/build.gradle +++ b/ddprof-lib/benchmarks/build.gradle @@ -17,7 +17,10 @@ application { // Include the main library headers tasks.withType(CppCompile).configureEach { + dependsOn ':ddprof-lib:copyUpstreamFiles' + includes file('../src/main/cpp').toString() + includes file('../src/main/cpp-external').toString() } // Add a task to run the benchmark