Skip to content

Commit

Permalink
Merge 2a357d1 into e91d80e
Browse files Browse the repository at this point in the history
  • Loading branch information
AllanJeremy committed Jun 24, 2022
2 parents e91d80e + 2a357d1 commit 8711011
Show file tree
Hide file tree
Showing 4 changed files with 1,213 additions and 12 deletions.
213 changes: 202 additions & 11 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ on:
push:
branches:
- master

paths-ignore:
- "docs/**"
- "papers/**"
Expand All @@ -13,12 +12,13 @@ on:
- "prototyping/**"

jobs:
benchmarks-run:
name: Run ${{ matrix.bench.title }}
windows:
name: Run ${{ matrix.bench.title }} (Windows ${{matrix.arch}})
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
os: [windows-latest]
arch: [Win32, x64]
bench:
- {
script: "run-benchmarks",
Expand All @@ -32,12 +32,25 @@ jobs:

runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau
- name: Checkout Luau repository
uses: actions/checkout@v3

- name: Build Luau
run: make config=release luau luau-analyze
shell: bash # necessary for fail-fast
run: |
mkdir build && cd build
cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo
cmake --build . --target Luau.Repl.CLI --config RelWithDebInfo
cmake --build . --target Luau.Analyze.CLI --config RelWithDebInfo
- name: Move build files to root
run: |
move build/RelWithDebInfo/* .
- name: Check dir structure
run: |
ls build/RelWithDebInfo
ls
- uses: actions/setup-python@v3
with:
python-version: "3.9"
Expand All @@ -48,18 +61,94 @@ jobs:
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
- name: Run benchmark
run: |
sudo apt-get install valgrind
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"

- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }} (Windows ${{matrix.arch}})
tool: "benchmarkluau"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
alert-threshold: 150%
fail-threshold: 200%
fail-on-alert: true
comment-on-alert: true
comment-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
unix:
name: Run ${{ matrix.bench.title }} (${{ matrix.os}})
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }

runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau repository
uses: actions/checkout@v3

- name: Build Luau
run: make config=release luau luau-analyze

- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"

- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Install valgrind
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get install valgrind
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee -a ${{ matrix.bench.script }}-output.txt

- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
if: matrix.os == 'ubuntu-latest'
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt

- name: Checkout Benchmark Results repository
Expand All @@ -78,12 +167,14 @@ jobs:
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
alert-threshold: 150%
fail-threshold: 1000%
fail-on-alert: false
fail-threshold: 200%
fail-on-alert: true
comment-on-alert: true
comment-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Store ${{ matrix.bench.title }} result
- name: Store ${{ matrix.bench.title }} result (CacheGrind)
if: matrix.os == 'ubuntu-latest'
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }} (CacheGrind)
Expand All @@ -97,7 +188,107 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
static-analysis:
name: Run ${{ matrix.bench.title }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
engine:
- { channel: stable, version: latest }
bench:
- {
script: "run-analyze",
timeout: 12,
title: "Luau Analyze",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
with:
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"

- name: Build Luau
run: make config=release luau luau-analyze

- uses: actions/setup-python@v4
with:
python-version: "3.9"
architecture: "x64"

- name: Install python dependencies
run: |
sudo pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
run: |
sudo apt-get install valgrind
- name: Run Luau Analyze on static file
run: sudo python ./bench/measure_time.py ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt

- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
run: sudo ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt

- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt

- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"

- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "benchmarkluau"

gh-pages-branch: "main"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
alert-threshold: 150%
fail-threshold: 200%
fail-on-alert: true
comment-on-alert: true
comment-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Store ${{ matrix.bench.title }} result (CacheGrind)
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "roblox"
gh-pages-branch: "main"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
alert-threshold: 150%
fail-threshold: 200%
fail-on-alert: true
comment-on-alert: true
comment-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
Expand Down
42 changes: 42 additions & 0 deletions bench/measure_time.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import os, sys, time, numpy

try:
import scipy
from scipy import mean, stats
except ModuleNotFoundError:
print("Warning: scipy package is not installed, confidence values will not be available")
stats = None

duration_list = []

DEFAULT_CYCLES_TO_RUN = 100
cycles_to_run = DEFAULT_CYCLES_TO_RUN

try:
cycles_to_run = sys.argv[3] if(sys.argv[3]) else DEFAULT_CYCLES_TO_RUN
cycles_to_run = int(cycles_to_run)
except IndexError:
pass
except (ValueError, TypeError):
cycles_to_run = DEFAULT_CYCLES_TO_RUN
print("Error: Cycles to run argument must be an integer. Using default value of {}".format(DEFAULT_CYCLES_TO_RUN))

# Numpy complains if we provide a cycle count of less than 3 ~ default to 3 whenever a lower value is provided
cycles_to_run = cycles_to_run if cycles_to_run > 2 else 3

for i in range(1,cycles_to_run):
start = time.perf_counter()

# Run the code you want to measure here
os.system(sys.argv[1])

end = time.perf_counter()

duration_ms = (end - start) * 1000
duration_list.append(duration_ms)

# Stats
mean = numpy.mean(duration_list)
std_err = stats.sem(duration_list)

print("SUCCESS: {} : {:.2f}ms +/- {:.2f}% on luau ".format('duration', mean,std_err))
Loading

0 comments on commit 8711011

Please sign in to comment.