Skip to content

Commit

Permalink
Merge 69ac887 into b066e4c
Browse files Browse the repository at this point in the history
  • Loading branch information
AllanJeremy authored Jun 14, 2022
2 parents b066e4c + 69ac887 commit 3de6b4d
Show file tree
Hide file tree
Showing 7 changed files with 1,434 additions and 0 deletions.
100 changes: 100 additions & 0 deletions .github/workflows/benchmark-cachegrind.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
name: Luau Benchmarks (CacheGrind)

on:
pull_request:
paths-ignore:
- "docs/**"
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"

push:
branches:
- master
paths-ignore:
- "docs/**"
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"

jobs:
benchmarks-run:
name: Run ${{ matrix.bench.title }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
engine:
- { channel: stable, version: latest }
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks (CacheGrind)",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }

runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau
uses: actions/checkout@v3

- name: Build Luau
run: make config=release luau luau-analyze

- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"

- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
run: |
sudo apt-get install valgrind
- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 | tee ${{ matrix.bench.script }}-output.txt

- name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/bench.py "${{ matrix.bench.cachegrindTitle }}" ${{ matrix.bench.cachegrindIterCount }} | tee -a ${{ matrix.bench.script }}-output.txt

- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"

- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "roblox"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
alert-threshold: 150%
fail-threshold: 200%
fail-on-alert: true
comment-on-alert: true
comment-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
93 changes: 93 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
name: Luau Benchmarks

on:
pull_request:
paths-ignore:
- "docs/**"
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"
push:
branches:
- master
paths-ignore:
- "docs/**"
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"

jobs:
benchmarks-run:
name: Run ${{ matrix.bench.title }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
engine:
- { channel: stable, version: latest }
bench:
- {
script: "run-benchmarks",
timeout: 12,
title: "Luau Benchmarks",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "luau-lang/benchmark-data", branch: "main" }

runs-on: ${{ matrix.os }}
steps:
- name: Checkout Luau repository
uses: actions/checkout@v3

- name: Build Luau
run: make config=release luau luau-analyze

- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"

- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Run benchmark
run: |
python bench/bench.py | tee ${{ matrix.bench.script }}-output.txt
- name: Checkout Benchmark Results repository
uses: actions/checkout@v3
with:
repository: ${{ matrix.benchResultsRepo.name }}
ref: ${{ matrix.benchResultsRepo.branch }}
token: ${{ secrets.BENCH_GITHUB_TOKEN }}
path: "./gh-pages"

- name: Store ${{ matrix.bench.title }} result
uses: Roblox/rhysd-github-action-benchmark@v-luau
with:
name: ${{ matrix.bench.title }}
tool: "benchmarkluau"
output-file-path: ./${{ matrix.bench.script }}-output.txt
external-data-json-path: ./gh-pages/dev/bench/data.json
alert-threshold: 150%
fail-threshold: 200%
fail-on-alert: true
comment-on-alert: true
comment-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Push benchmark results
if: github.event_name == 'push'
run: |
echo "Pushing benchmark results..."
cd gh-pages
git config user.name github-actions
git config user.email github@users.noreply.github.com
git add ./dev/bench/data.json
git commit -m "Add benchmarks results for ${{ github.sha }}"
git push
cd ..
28 changes: 28 additions & 0 deletions .github/workflows/setup-luau.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Setup Luau

on:
workflow_call:
inputs:
runsOn:
required: true
type: string

jobs:
setup-luau:
name: Setup Luau
runs-on: ${{ inputs.runsOn }}
steps:
- uses: actions/checkout@v3

- name: Build Luau
run: make config=release luau luau-analyze

- uses: actions/setup-python@v3
with:
python-version: "3.9"
architecture: "x64"

- name: Install python dependencies
run: |
python -m pip install requests
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
99 changes: 99 additions & 0 deletions .github/workflows/static-analysis.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
name: Luau static file analysis

on:
push:
branches:
- "feat/add-static-file-luau-analyze"
pull_request:
paths-ignore:
- "docs/**"
- "papers/**"
- "rfcs/**"
- "*.md"
- "prototyping/**"

jobs:
benchmarks-run:
name: Run ${{ matrix.bench.title }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
engine:
- { channel: stable, version: latest }
bench:
- {
script: "run-analyze",
timeout: 12,
title: "Luau Analyze",
cachegrindTitle: "Performance",
cachegrindIterCount: 20,
}
benchResultsRepo:
- { name: "AllanJeremy/luau-benchmark-results", branch: "main" }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
with:
token: "${{ secrets.BENCH_GITHUB_TOKEN }}"

- name: Build Luau
run: make config=release luau luau-analyze

- uses: actions/setup-python@v4
with:
python-version: "3.9"
architecture: "x64"

- name: Install python dependencies
run: |
pip install requests numpy scipy matplotlib ipython jupyter pandas sympy nose
- name: Install valgrind
run: |
sudo apt-get install valgrind
# - name: Run Luau Analyze on static file
# run: python ./bench/measure_time.py ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee ${{ matrix.bench.script }}-output.txt

- name: Run ${{ matrix.bench.title }} (Cold Cachegrind)
run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}Cold" 1 ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt

# - name: Run ${{ matrix.bench.title }} (Warm Cachegrind)
# run: sudo bash ./scripts/run-with-cachegrind.sh python ./bench/measure_time.py "${{ matrix.bench.cachegrindTitle}}" 1 ./build/release/luau-analyze bench/static_analysis/LuauPolyfillMap.lua | tee -a ${{ matrix.bench.script }}-output.txt

# - name: Checkout Benchmark Results repository
# uses: actions/checkout@v3
# with:
# repository: ${{ matrix.benchResultsRepo.name }}
# ref: ${{ matrix.benchResultsRepo.branch }}
# token: ${{ secrets.BENCH_GITHUB_TOKEN }}
# path: "./gh-pages"

# - name: Store ${{ matrix.bench.title }} result
# uses: Roblox/rhysd-github-action-benchmark@v-luau
# with:
# name: ${{ matrix.bench.title }}
# tool: "benchmarkluau"

# gh-pages-branch: "main"
# output-file-path: ./${{ matrix.bench.script }}-output.txt
# external-data-json-path: ./gh-pages/dev/bench/data.json
# alert-threshold: 150%
# fail-threshold: 200%
# fail-on-alert: true
# comment-on-alert: true
# comment-always: true
# github-token: ${{ secrets.GITHUB_TOKEN }}

# - name: Push benchmark results
# if: github.event_name == 'push'
# run: |
# echo "Pushing benchmark results..."
# cd gh-pages
# git config user.name github-actions
# git config user.email github@users.noreply.github.com
# git add ./dev/bench/data.json
# git commit -m "Add benchmarks results for ${{ github.sha }}"
# git push
# cd ..
44 changes: 44 additions & 0 deletions bench/measure_time.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import os, sys, time, numpy

try:
import scipy

from scipy import mean, stats
except ModuleNotFoundError:
print("Warning: scipy package is not installed, confidence values will not be available")
stats = None


duration_list = []

DEFAULT_CYCLES_TO_RUN = 100
cycles_to_run = DEFAULT_CYCLES_TO_RUN

try:
cycles_to_run = sys.argv[3] if(sys.argv[3]) else DEFAULT_CYCLES_TO_RUN
cycles_to_run = int(cycles_to_run)
except IndexError:
pass
except (ValueError, TypeError):
cycles_to_run = DEFAULT_CYCLES_TO_RUN
print("Error: Cycles to run argument must be an integer. Using default value of {}".format(DEFAULT_CYCLES_TO_RUN))

# Numpy complains if we provide a cycle count of less than 3 ~ default to 3 whenever a lower value is provided
cycles_to_run = cycles_to_run if cycles_to_run > 2 else 3

for i in range(1,cycles_to_run):
start = time.perf_counter()

# Run the code you want to measure here
os.system(sys.argv[1])

end = time.perf_counter()

duration_ms = (end - start) * 1000
duration_list.append(duration_ms)

# Stats
mean = numpy.mean(duration_list)
std_err = stats.sem(duration_list)

print("SUCCESS: {} : {:.2f}ms +/- {:.2f}% on luau ".format('duration', mean,std_err))
Loading

0 comments on commit 3de6b4d

Please sign in to comment.