Skip to content

Benchmarking - handle all NaN signal for WFDB write_waveform format #37

Benchmarking - handle all NaN signal for WFDB write_waveform format

Benchmarking - handle all NaN signal for WFDB write_waveform format #37

Workflow file for this run

name: Benchmark new formats
on:
pull_request:
paths:
- 'waveform_benchmark/formats/**'
jobs:
benchmark-new-formats:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10"]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Setup Java
uses: actions/setup-java@v3
with:
java-version: '11'
distribution: 'adopt'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest
pip install -r requirements.txt
- name: Discover JVM Path
run: |
JAVA_HOME=$(dirname $(dirname $(readlink -f $(which java))))
echo "JAVA_HOME: $JAVA_HOME"
if [ -d "$JAVA_HOME/lib/server" ]; then
echo "JPY_JVM_DLL=$JAVA_HOME/lib/server/libjvm.so"
elif [ -d "$JAVA_HOME/jre/lib/server" ]; then
echo "JPY_JVM_DLL=$JAVA_HOME/jre/lib/server/libjvm.so"
elif [ -d "$JAVA_HOME/lib/client" ]; then
echo "JPY_JVM_DLL=$JAVA_HOME/lib/client/libjvm.so"
else
echo "libjvm.so not found in usual locations"
fi
- name: Copy .env.example to .env
run: cp .env.example .env
- name: Update .env with JVM DLL Path
run: |
echo "export JPY_JVM_DLL='/opt/hostedtoolcache/Java_Adopt_jdk/11.0.23-9/x64/lib/server/libjvm.so'" >> .env
cat .env # Optionally display the .env file to verify changes
- name: Find new or modified benchmark files
id: find-formats
run: |
git fetch origin main
FILES=$(git diff --diff-filter=AM --name-only origin/main...HEAD 'waveform_benchmark/formats/*.py')
echo "$FILES" > format_files.txt
cat format_files.txt
- name: Run benchmark tests on new or modified format files
run: |
FORMAT_FILES=$(cat format_files.txt)
if [ -z "$FORMAT_FILES" ]; then
echo "No format files found to process."
exit 0
fi
IFS=' ' read -ra FILES <<< "$FORMAT_FILES"
RECORD_NAME="./data/waveforms/mimic_iv/waves/p100/p10079700/85594648/85594648"
for file in "${FILES[@]}"
do
# Skip base.py file
if [[ "$file" == *"base.py" ]]; then
echo "Skipping benchmark for $file"
continue
fi
# Extract the module path from the file path
MODULE_PATH=$(echo "$file" | sed 's/\//./g' | sed 's/\.py$//')
# Find and list all classes in the module
echo "Running benchmarks for classes in $MODULE_PATH"
CLASSES=$(python3 -c "import sys; import inspect; from importlib import import_module; mod = import_module('$MODULE_PATH'); print(' '.join(cls for cls, obj in inspect.getmembers(mod, inspect.isclass) if obj.__module__ == mod.__name__ and not cls.startswith('Base')))")
IFS=' ' read -ra CLS <<< "$CLASSES"
for cls in "${CLS[@]}"
do
CLASS_PATH="$MODULE_PATH.$cls"
FILENAME="benchmark_results_${cls}.txt"
echo "Benchmarking $CLASS_PATH"
./waveform_benchmark.py -r "$RECORD_NAME" -f "$CLASS_PATH" > "$FILENAME"
done
done
- name: Concatenate results
run: |
cat benchmark_results_*.txt > final_results.txt
echo "Benchmark results:"
cat final_results.txt
- name: Post results to pull request
uses: actions/github-script@v7
with:
script: |
if [ ! -s final_results.txt ]; then
echo "No results to post."
exit 0
fi
const fs = require('fs');
const resultContent = fs.readFileSync('final_results.txt', 'utf8');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `Benchmark results:\n\`\`\`${resultContent}\`\`\``
});