From 3a5051e3929e7bd709e630e8b8a07b4eb34bf123 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Sat, 15 Apr 2017 05:06:13 +0200 Subject: [PATCH 1/4] Fix SR-4598 Add option to run subset of benchmarks matching a prefix --- benchmark/scripts/Benchmark_Driver | 87 +++++++++++++++++------------- 1 file changed, 50 insertions(+), 37 deletions(-) diff --git a/benchmark/scripts/Benchmark_Driver b/benchmark/scripts/Benchmark_Driver index 22cb3e87ea25c..20ca0cfdf2362 100755 --- a/benchmark/scripts/Benchmark_Driver +++ b/benchmark/scripts/Benchmark_Driver @@ -118,9 +118,17 @@ def instrument_test(driver_path, test, num_samples): return avg_test_output -def get_tests(driver_path): + +def get_tests(driver_path, args): """Return a list of available performance tests""" - return subprocess.check_output([driver_path, '--list']).split()[2:] + tests = subprocess.check_output([driver_path, '--list']).split()[2:] + if args.filter: + prefix = args.filter + tests = filter(lambda name: name.startswith(prefix), tests) + elif args.benchmark: + benchmarks = set(args.benchmark) + tests = sorted(list(set(tests).intersection(benchmarks))) + return tests def get_current_git_branch(git_repo_path): @@ -168,9 +176,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False, line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}' if verbose and log_directory: print(line_format.format(*headings)) - for test in get_tests(driver): - if benchmarks and test not in benchmarks: - continue + for test in benchmarks: test_output = instrument_test(driver, test, num_samples) if test_output[0] == 'Totals': continue @@ -219,7 +225,7 @@ def submit(args): file = os.path.join(args.tests, "Benchmark_" + optset) try: res = run_benchmarks( - file, benchmarks=args.benchmark, + file, benchmarks=get_tests(file, args), num_samples=args.iterations) data['Tests'].extend(parse_results(res, optset)) except subprocess.CalledProcessError as e: @@ -243,7 +249,7 @@ def run(args): optset = args.optimization file = os.path.join(args.tests, "Benchmark_" + optset) run_benchmarks( - file, benchmarks=args.benchmarks, + file, benchmarks=get_tests(file, args), num_samples=args.iterations, verbose=True, log_directory=args.output_dir, swift_repo=args.swift_repo) @@ -346,17 +352,40 @@ def positive_int(value): def main(): - parser = argparse.ArgumentParser(description='Swift benchmarks driver') - subparsers = parser.add_subparsers() - - submit_parser = subparsers.add_parser( - 'submit', - help='run benchmarks and submit results to LNT') - submit_parser.add_argument( + parser = argparse.ArgumentParser( + epilog='Example: ./Benchmark_Driver run -i 5 -f Array' + ) + subparsers = parser.add_subparsers( + title='Swift benchmark driver commands', + help='See COMMAND -h for additional arguments', metavar='') + + parent_parser = argparse.ArgumentParser(add_help=False) + benchmarks_group = parent_parser.add_mutually_exclusive_group() + benchmarks_group.add_argument( + 'benchmark', + default=[], + help='benchmark to run (default: all)', nargs='*', metavar="BENCHMARK") + benchmarks_group.add_argument( + '-f', '--filter', + help='run all tests whose name starts with PREFIX', metavar="PREFIX") + parent_parser.add_argument( '-t', '--tests', help='directory containing Benchmark_O{,none,unchecked} ' + '(default: DRIVER_DIR)', default=DRIVER_DIR) + + submit_parser = subparsers.add_parser( + 'submit', + help='Run benchmarks and submit results to LNT', + parents=[parent_parser]) + submit_parser.add_argument( + '-o', '--optimization', nargs='+', + help='optimization levels to use (default: O Onone Ounchecked)', + default=['O', 'Onone', 'Ounchecked']) + submit_parser.add_argument( + '-i', '--iterations', + help='number of times to run each test (default: 10)', + type=positive_int, default=10) submit_parser.add_argument( '-m', '--machine', required=True, help='LNT machine name') @@ -366,48 +395,32 @@ def main(): submit_parser.add_argument( '-l', '--lnt_host', required=True, help='LNT host to submit results to') - submit_parser.add_argument( - '-i', '--iterations', - help='number of times to run each test (default: 10)', - type=positive_int, default=10) - submit_parser.add_argument( - '-o', '--optimization', nargs='+', - help='optimization levels to use (default: O Onone Ounchecked)', - default=['O', 'Onone', 'Ounchecked']) - submit_parser.add_argument( - 'benchmark', - help='benchmark to run (default: all)', nargs='*') submit_parser.set_defaults(func=submit) run_parser = subparsers.add_parser( 'run', - help='run benchmarks and output results to stdout') + help='Run benchmarks and output results to stdout', + parents=[parent_parser]) run_parser.add_argument( - '-t', '--tests', - help='directory containing Benchmark_O{,none,unchecked} ' + - '(default: DRIVER_DIR)', - default=DRIVER_DIR) + '-o', '--optimization', + metavar='OPT', + choices=['O', 'Onone', 'Ounchecked'], + help='optimization level to use (default: O)', default='O') run_parser.add_argument( '-i', '--iterations', help='number of times to run each test (default: 1)', type=positive_int, default=1) - run_parser.add_argument( - '-o', '--optimization', - help='optimization level to use (default: O)', default='O') run_parser.add_argument( '--output-dir', help='log results to directory (default: no logging)') run_parser.add_argument( '--swift-repo', help='absolute path to Swift source repo for branch comparison') - run_parser.add_argument( - 'benchmarks', - help='benchmark to run (default: all)', nargs='*') run_parser.set_defaults(func=run) compare_parser = subparsers.add_parser( 'compare', - help='compare benchmark results') + help='Compare benchmark results') compare_parser.add_argument( '--log-dir', required=True, help='directory containing benchmark logs') From 094094a04cd206e5f2d08e99cb9449839ac7bcfb Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Mon, 24 Apr 2017 23:53:40 +0200 Subject: [PATCH 2/4] Addressed python style issue raised during review --- benchmark/scripts/Benchmark_Driver | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/benchmark/scripts/Benchmark_Driver b/benchmark/scripts/Benchmark_Driver index 20ca0cfdf2362..580e33429b9ac 100755 --- a/benchmark/scripts/Benchmark_Driver +++ b/benchmark/scripts/Benchmark_Driver @@ -123,12 +123,10 @@ def get_tests(driver_path, args): """Return a list of available performance tests""" tests = subprocess.check_output([driver_path, '--list']).split()[2:] if args.filter: - prefix = args.filter - tests = filter(lambda name: name.startswith(prefix), tests) - elif args.benchmark: - benchmarks = set(args.benchmark) - tests = sorted(list(set(tests).intersection(benchmarks))) - return tests + return filter(lambda name: name.startswith(args.filter), tests) + if not args.benchmark: + return tests + return sorted(list(set(tests).intersection(set(args.benchmark)))) def get_current_git_branch(git_repo_path): From 5c29fa65b11e1fe006749afa101b352756f33a52 Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Tue, 25 Apr 2017 03:25:29 +0200 Subject: [PATCH 3/4] Renamed args.benchmarks - plural --- benchmark/scripts/Benchmark_Driver | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmark/scripts/Benchmark_Driver b/benchmark/scripts/Benchmark_Driver index 580e33429b9ac..05341e243eae9 100755 --- a/benchmark/scripts/Benchmark_Driver +++ b/benchmark/scripts/Benchmark_Driver @@ -124,9 +124,9 @@ def get_tests(driver_path, args): tests = subprocess.check_output([driver_path, '--list']).split()[2:] if args.filter: return filter(lambda name: name.startswith(args.filter), tests) - if not args.benchmark: + if not args.benchmarks: return tests - return sorted(list(set(tests).intersection(set(args.benchmark)))) + return sorted(list(set(tests).intersection(set(args.benchmarks)))) def get_current_git_branch(git_repo_path): @@ -360,7 +360,7 @@ def main(): parent_parser = argparse.ArgumentParser(add_help=False) benchmarks_group = parent_parser.add_mutually_exclusive_group() benchmarks_group.add_argument( - 'benchmark', + 'benchmarks', default=[], help='benchmark to run (default: all)', nargs='*', metavar="BENCHMARK") benchmarks_group.add_argument( From cecfe439c6b96568f1b84a557af6cb2c28cf227c Mon Sep 17 00:00:00 2001 From: Pavol Vaskovic Date: Tue, 25 Apr 2017 03:50:40 +0200 Subject: [PATCH 4/4] Display OPT choices in usage help Fixed pyton-lint warnings --- benchmark/scripts/Benchmark_Driver | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/benchmark/scripts/Benchmark_Driver b/benchmark/scripts/Benchmark_Driver index 05341e243eae9..637e02fc425bf 100755 --- a/benchmark/scripts/Benchmark_Driver +++ b/benchmark/scripts/Benchmark_Driver @@ -118,7 +118,6 @@ def instrument_test(driver_path, test, num_samples): return avg_test_output - def get_tests(driver_path, args): """Return a list of available performance tests""" tests = subprocess.check_output([driver_path, '--list']).split()[2:] @@ -377,9 +376,9 @@ def main(): help='Run benchmarks and submit results to LNT', parents=[parent_parser]) submit_parser.add_argument( - '-o', '--optimization', nargs='+', - help='optimization levels to use (default: O Onone Ounchecked)', - default=['O', 'Onone', 'Ounchecked']) + '-o', '--optimization', nargs='+', + help='optimization levels to use (default: O Onone Ounchecked)', + default=['O', 'Onone', 'Ounchecked']) submit_parser.add_argument( '-i', '--iterations', help='number of times to run each test (default: 10)', @@ -403,7 +402,8 @@ def main(): '-o', '--optimization', metavar='OPT', choices=['O', 'Onone', 'Ounchecked'], - help='optimization level to use (default: O)', default='O') + help='optimization level to use: {O,Onone,Ounchecked}, (default: O)', + default='O') run_parser.add_argument( '-i', '--iterations', help='number of times to run each test (default: 1)',