Skip to content

Remove zrml-simple-disputes and migrate markets #2685

Remove zrml-simple-disputes and migrate markets

Remove zrml-simple-disputes and migrate markets #2685

Workflow file for this run

name: Benchmark weights
on:
workflow_dispatch:
pull_request:
types: [ labeled ]
env:
CARGO_TERM_COLOR: always
jobs:
benchmark:
name: Benchmark weights
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 's:benchmark-required')) || github.event_name == 'workflow_dispatch'
runs-on: [self-hosted, runtime-benchmark]
defaults:
run:
shell: bash
steps:
# Ensure that at most one benchmark worklow runs across all workflows
- name: Turnstyle
uses: softprops/turnstyle@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Checkout repository (PR)
uses: actions/checkout@v2
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 's:benchmark-required'))
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Checkout repository (branch)
uses: actions/checkout@v2
if: github.event_name == 'workflow_dispatch'
- name: Install build tools
run: ./scripts/init.sh
- name: Cache dependencies
uses: Swatinem/rust-cache@v1
- name: Test runtime-benchmarks
run: cargo test --release --features runtime-benchmarks
- name: Build Zeitgeist chain with runtime-benchmarks feature
run: cargo build --release --features runtime-benchmarks --bin zeitgeist
- name: Run benchmarks
id: run_benchmarks
run: |
# define benchmark commands
source ./scripts/benchmarks/configuration.sh
echo "Fetching commit hash of HEAD"
zg_cmt=`git log -n 1 --format="%H"`
echo "Commit hash: ${zg_cmt}"
# Takes variable name as parameter $1
prepare_output() {
# Add result box that contains contents of parameter $1
local txt_proc="<details>
<summary>
Results
</summary>
${!1}
</details>"
# Prepare output for github output format
txt_proc="${txt_proc//'%'/'%25'}"
txt_proc="${txt_proc//$'\n'/'%0A'}"
txt_proc="${txt_proc//$'\r'/'%0D'}"
eval "$1=\"${txt_proc//\"/\'}\""
}
check_if_new_commits() {
echo "Fetching latest repository state"
git fetch
if [ $? -ne 0 ]; then
ERROR="ERROR: git fetch failed. Aborting benchmarks."
prepare_output ERROR
echo "::set-output name=ERROR::$ERROR"
exit 0
fi
echo "Determining if the branch contains new updates."
if [ ${{ github.event_name }} == 'pull_request' ]; then
local zg_cur_cmt=`git log -n 1 --format="%H" origin/${{ github.event.pull_request.head.ref }}`
else
local zg_cur_cmt=`git log -n 1 --format="%H" origin/${GITHUB_REF##*/}`
fi
if [ "$zg_cmt" != "$zg_cur_cmt" ]; then
echo "${zg_cmt} != ${zg_cur_cmt}"
ERROR="ERROR: Branch contains new commits. Aborting benchmarks."
prepare_output ERROR
echo "::set-output name=ERROR::$ERROR"
exit 0
fi
}
check_if_new_commits
# Execute benchmark. Note: It's not possible to use a matrix here in
# combination with GitHub AE hosted runner (one instance per job)
for pallet in "${ZEITGEIST_PALLETS[@]}"; do
# Execute benchmark
pallet_folder_name=${pallet//zrml_/}
pallet_folder_name=${pallet_folder_name//_/-}
command="./target/release/zeitgeist benchmark --chain=dev "
command+="--steps=$ZEITGEIST_PALLETS_STEPS --repeat=$ZEITGEIST_PALLETS_RUNS "
command+="--pallet=$pallet --extrinsic=* --execution=wasm --wasm-execution=compiled "
command+="--heap-pages=4096 --template=./misc/weight_template.hbs "
command+="--output=./zrml/$pallet_folder_name/src/weights.rs"
echo "Executing: $command"
# use temporary log to display contents in github workflow
$command > _temp.log 2> benchmark_errors.txt
if [ $? -ne 0 ]; then
# Prepare error for github output format
cat _temp.log
ERROR=$(<benchmark_errors.txt)
prepare_output ERROR
echo "::set-output name=ERROR::$ERROR"
exit 0
fi
cat _temp.log | tee --append benchmark_log.txt
check_if_new_commits
done
BENCHMARK_LOG=$(sed -E "s/(Writes\s=\s[0-9]+)/\1\n/g" benchmark_log.txt | sed "s/========/--------/g")
prepare_output BENCHMARK_LOG
echo "::set-output name=RESULT::$BENCHMARK_LOG"
- name: Format code
if: steps.run_benchmarks.outputs.ERROR == ''
run: cargo fmt
- name: Commit updated weights
uses: stefanzweifel/git-auto-commit-action@v4
if: steps.run_benchmarks.outputs.ERROR == ''
with:
commit_message: Update weights
commit_options: '--no-verify'
file_pattern: zrml/*/src/weights.rs
commit_user_name: Zeitgeist Benchmark Bot
commit_user_email: zeitgeist-benchmark-bot@no-reply.zeitgeist.pm
status_options: '--untracked-files=no'
- name: Remove label s:benchmark-required
if: github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 's:benchmark-required')
uses: buildsville/add-remove-label@v1
with:
token: ${{secrets.GITHUB_TOKEN}}
label: s:benchmark-required
type: remove
- name: Add label s:benchmark-done
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 's:benchmark-required')) && steps.run_benchmarks.outputs.ERROR == ''
uses: buildsville/add-remove-label@v1
with:
token: ${{secrets.GITHUB_TOKEN}}
label: s:benchmark-done
type: add
- name: Add label s:benchmark-aborted
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 's:benchmark-required')) && steps.run_benchmarks.outputs.ERROR != ''
uses: buildsville/add-remove-label@v1
with:
token: ${{secrets.GITHUB_TOKEN}}
label: s:benchmark-aborted
type: add
- name: Comment PR (on success)
uses: thollander/actions-comment-pull-request@v1.0.4
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 's:benchmark-required')) && steps.run_benchmarks.outputs.ERROR == ''
with:
message: 'Benchmark completed successfully.
${{ steps.run_benchmarks.outputs.RESULT }}'
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Comment PR (on failure)
uses: thollander/actions-comment-pull-request@v1.0.4
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 's:benchmark-required')) && steps.run_benchmarks.outputs.ERROR != ''
with:
message: 'Benchmark aborted due to an error.
${{ steps.run_benchmarks.outputs.ERROR }}'
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Exit with error message
if: steps.run_benchmarks.outputs.ERROR != ''
run: |
echo "${{ steps.run_benchmarks.outputs.ERROR }}"
exit 1