Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 48 additions & 37 deletions benchmark/scripts/Benchmark_Driver
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,14 @@ def instrument_test(driver_path, test, num_samples):
return avg_test_output


def get_tests(driver_path):
def get_tests(driver_path, args):
"""Return a list of available performance tests"""
return subprocess.check_output([driver_path, '--list']).split()[2:]
tests = subprocess.check_output([driver_path, '--list']).split()[2:]
if args.filter:
return filter(lambda name: name.startswith(args.filter), tests)
if not args.benchmarks:
return tests
return sorted(list(set(tests).intersection(set(args.benchmarks))))


def get_current_git_branch(git_repo_path):
Expand Down Expand Up @@ -168,9 +173,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
if verbose and log_directory:
print(line_format.format(*headings))
for test in get_tests(driver):
if benchmarks and test not in benchmarks:
continue
for test in benchmarks:
test_output = instrument_test(driver, test, num_samples)
if test_output[0] == 'Totals':
continue
Expand Down Expand Up @@ -219,7 +222,7 @@ def submit(args):
file = os.path.join(args.tests, "Benchmark_" + optset)
try:
res = run_benchmarks(
file, benchmarks=args.benchmark,
file, benchmarks=get_tests(file, args),
num_samples=args.iterations)
data['Tests'].extend(parse_results(res, optset))
except subprocess.CalledProcessError as e:
Expand All @@ -243,7 +246,7 @@ def run(args):
optset = args.optimization
file = os.path.join(args.tests, "Benchmark_" + optset)
run_benchmarks(
file, benchmarks=args.benchmarks,
file, benchmarks=get_tests(file, args),
num_samples=args.iterations, verbose=True,
log_directory=args.output_dir,
swift_repo=args.swift_repo)
Expand Down Expand Up @@ -346,17 +349,40 @@ def positive_int(value):


def main():
parser = argparse.ArgumentParser(description='Swift benchmarks driver')
subparsers = parser.add_subparsers()

submit_parser = subparsers.add_parser(
'submit',
help='run benchmarks and submit results to LNT')
submit_parser.add_argument(
parser = argparse.ArgumentParser(
epilog='Example: ./Benchmark_Driver run -i 5 -f Array'
)
subparsers = parser.add_subparsers(
title='Swift benchmark driver commands',
help='See COMMAND -h for additional arguments', metavar='<command>')

parent_parser = argparse.ArgumentParser(add_help=False)
benchmarks_group = parent_parser.add_mutually_exclusive_group()
benchmarks_group.add_argument(
'benchmarks',
default=[],
help='benchmark to run (default: all)', nargs='*', metavar="BENCHMARK")
benchmarks_group.add_argument(
'-f', '--filter',
help='run all tests whose name starts with PREFIX', metavar="PREFIX")
parent_parser.add_argument(
'-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)

submit_parser = subparsers.add_parser(
'submit',
help='Run benchmarks and submit results to LNT',
parents=[parent_parser])
submit_parser.add_argument(
'-o', '--optimization', nargs='+',
help='optimization levels to use (default: O Onone Ounchecked)',
default=['O', 'Onone', 'Ounchecked'])
submit_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 10)',
type=positive_int, default=10)
submit_parser.add_argument(
'-m', '--machine', required=True,
help='LNT machine name')
Expand All @@ -366,48 +392,33 @@ def main():
submit_parser.add_argument(
'-l', '--lnt_host', required=True,
help='LNT host to submit results to')
submit_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 10)',
type=positive_int, default=10)
submit_parser.add_argument(
'-o', '--optimization', nargs='+',
help='optimization levels to use (default: O Onone Ounchecked)',
default=['O', 'Onone', 'Ounchecked'])
submit_parser.add_argument(
'benchmark',
help='benchmark to run (default: all)', nargs='*')
submit_parser.set_defaults(func=submit)

run_parser = subparsers.add_parser(
'run',
help='run benchmarks and output results to stdout')
help='Run benchmarks and output results to stdout',
parents=[parent_parser])
run_parser.add_argument(
'-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
'-o', '--optimization',
metavar='OPT',
choices=['O', 'Onone', 'Ounchecked'],
help='optimization level to use: {O,Onone,Ounchecked}, (default: O)',
default='O')
run_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 1)',
type=positive_int, default=1)
run_parser.add_argument(
'-o', '--optimization',
help='optimization level to use (default: O)', default='O')
run_parser.add_argument(
'--output-dir',
help='log results to directory (default: no logging)')
run_parser.add_argument(
'--swift-repo',
help='absolute path to Swift source repo for branch comparison')
run_parser.add_argument(
'benchmarks',
help='benchmark to run (default: all)', nargs='*')
run_parser.set_defaults(func=run)

compare_parser = subparsers.add_parser(
'compare',
help='compare benchmark results')
help='Compare benchmark results')
compare_parser.add_argument(
'--log-dir', required=True,
help='directory containing benchmark logs')
Expand Down