Skip to content

Commit

Permalink
[CI] Remove benchmark workflow, but run it in dedicated server (#159)
Browse files Browse the repository at this point in the history
* remove benchmark workflow

* .
  • Loading branch information
yaoyaoding committed Apr 5, 2023
1 parent dfa638e commit 916feb4
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 68 deletions.
66 changes: 0 additions & 66 deletions .github/workflows/benchmark.yaml

This file was deleted.

16 changes: 16 additions & 0 deletions scripts/bench/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Benchmark Server

This script is used to benchmark the performance of hidet, repeatedly running and send github comment to an issue.

## Usage

```bash
# Install github cli
sudo apt install -y gh
# clone the repo
git clone git@github.com:hidet-org/hidet
# cd into the repo
cd hidet
# run the daemon script, you can specify the issue to send the report to
python scripts/bench/run.py [--issue <issue>]
```
13 changes: 11 additions & 2 deletions scripts/ci/benchmark.py → scripts/bench/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@


parser = argparse.ArgumentParser('Benchmark hidet performance.')
parser.add_argument('--git-commit', default=None, type=str, help='Git commit hash.')
parser.add_argument('--git-prev-commit', default=None, type=str, help='Previous git commit hash.')
parser.add_argument('--git-commit', type=str, help='Git commit hash.')
parser.add_argument('--space', default=0, type=int, help='Search space of hidet.')
parser.add_argument('--report', default='./report.txt', type=str, help='Report file path.')

Expand All @@ -33,12 +34,20 @@ def info(args) -> str:
envs = [
'# {}'.format(datetime.datetime.now(pytz.timezone('US/Eastern')).strftime('%Y-%m-%d')),
'- Hidet version: {}'.format(hidet.__version__),
'- Git commit: {}'.format(args.git_commit),
'- PyTorch version: {}'.format(torch.__version__),
'- OS: {}'.format(distro.name(pretty=True)),
'- GPU: {}'.format(cudart.cudaGetDeviceProperties(0)[1].name.decode('utf-8')),
'- GPU driver: {} ({})'.format(nvidia_gpu_driver(), nvidia_cuda_version()),
]
if args.git_prev_commit and args.git_commit:
envs += [
'- Git diff: {}'.format(args.git_prev_commit + '...' + args.git_commit),
]
else:
envs += [
'- Git commit: {}'.format(args.git_commit),
]
envs.append('')
return '\n'.join(envs)


Expand Down
76 changes: 76 additions & 0 deletions scripts/bench/run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import os
import time
import argparse
import subprocess

parser = argparse.ArgumentParser('Benchmark performance.')
parser.add_argument('--issue-id', type=int, default=154, help='Issue id to send the benchmark result to.')

def install_dependencies():
subprocess.run(['pip', 'install', '-r', 'requirements.txt'], check=True)
subprocess.run(['pip', 'install', '-r', 'requirements-dev.txt'], check=True)

def pull_repo():
subprocess.run(['git', 'pull'], check=True)


def reinstall_hidet():
subprocess.run(['pip', 'uninstall', 'hidet', '-y'], check=True)
subprocess.run(['mkdir', '-p', 'build'], check=True)
subprocess.run(['rm', '-rf', 'build/*'], check=True)
subprocess.run(['cmake', '-S', '.', '-B', 'build'], check=True)
subprocess.run(['pip', 'install', '-e', '.'], check=True)


def run_bench_script(report_file):
current_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
command = 'python scripts/bench/benchmark.py --git-commit {commit} --report {report_file}'.format(
commit=current_commit, report_file=report_file
)

if os.path.exists('scripts/bench/prev_commit.txt'):
with open('scripts/bench/prev_commit.txt', 'r') as f:
prev_commit = f.readline().strip()
command += ' --git-prev-commit {}'.format(prev_commit)

subprocess.run(command.split(), check=True)

with open('scripts/bench/prev_commit.txt', 'w') as f:
f.write(current_commit)

def send_report(issue_id, result_file):
command = 'gh issue comment {issue_id} -F {result_file} -R hidet-org/hidet'.format(
issue_id=issue_id, result_file=result_file
)
subprocess.run(command.split(), check=True)


def main():
args = parser.parse_args()
if not os.path.exists('./scripts/bench/benchmark.py'):
raise RuntimeError('Please run this script from the root directory of the repository.')

install_dependencies()

while True:
t1 = time.time()
try:
report_file = './scripts/bench/report.txt'
pull_repo()
reinstall_hidet()
run_bench_script(report_file)
send_report(args.issue_id, report_file)
except Exception as e:
print('Error: {}'.format(e))
time.sleep(60 * 60) # wait for 1 hour
else:
# run the benchmark once every day
t2 = time.time()
print('Elapsed time: {} seconds'.format(t2 - t1))
if t2 - t1 < 60 * 60 * 24:
time.sleep(60 * 60 * 24 - (t2 - t1))


if __name__ == '__main__':
main()

0 comments on commit 916feb4

Please sign in to comment.