Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
153 changes: 72 additions & 81 deletions benchmark/scripts/Benchmark_Driver
Original file line number Diff line number Diff line change
Expand Up @@ -81,46 +81,25 @@ def submit_to_lnt(data, url):

def instrument_test(driver_path, test, num_samples):
"""Run a test and instrument its peak memory use"""
test_outputs = []
for _ in range(num_samples):
test_output_raw = subprocess.check_output(
['time', '-lp', driver_path, test],
stderr=subprocess.STDOUT
)
peak_memory = re.match('\s*(\d+)\s*maximum resident set size',
test_output_raw.split('\n')[-15]).group(1)
test_outputs.append(test_output_raw.split()[1].split(',') +
[peak_memory])

# Average sample results
num_samples_index = 2
min_index = 3
max_index = 4
avg_start_index = 5

# TODO: Correctly take stdev
avg_test_output = test_outputs[0]
avg_test_output[avg_start_index:] = map(int,
avg_test_output[avg_start_index:])
for test_output in test_outputs[1:]:
for i in range(avg_start_index, len(test_output)):
avg_test_output[i] += int(test_output[i])
for i in range(avg_start_index, len(avg_test_output)):
avg_test_output[i] = int(round(avg_test_output[i] /
float(len(test_outputs))))
avg_test_output[num_samples_index] = num_samples
avg_test_output[min_index] = min(
test_outputs, key=lambda x: int(x[min_index]))[min_index]
avg_test_output[max_index] = max(
test_outputs, key=lambda x: int(x[max_index]))[max_index]
avg_test_output = map(str, avg_test_output)

return avg_test_output


def get_tests(driver_path):
test_output_raw = subprocess.check_output(
['time', '-lp', driver_path, test,
'--num-samples={}'.format(num_samples)],
stderr=subprocess.STDOUT
)
peak_memory = re.match('\s*(\d+)\s*maximum resident set size',
test_output_raw.split('\n')[-15]).group(1)
test_output = (test_output_raw.split()[1].split(',') + [peak_memory])
return test_output


def get_tests(driver_path, args):
"""Return a list of available performance tests"""
return subprocess.check_output([driver_path, '--list']).split()[2:]
tests = subprocess.check_output([driver_path, '--list']).split()[2:]
if args.filter:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you use early returns here to eliminate the elif. You do not have any code of consequence after the condition. This is communicated to the reader of the code in a more succint way by using early returns. i.e.:

if args.filter:
    prefix = args.filter
    return filter(lambda name: name.startswith(prefix), tests)

if not args.benchmark:
    return tests

benchmarks = set(args.benchmark)
return sorted(list(set(tests).intersection(benchmarks)))

Notice how just by reading the code the reader is able to know the amount of work left to be returned without having to read the whole function.

return filter(lambda name: name.startswith(args.filter), tests)
if not args.benchmark:
return tests
return sorted(list(set(tests).intersection(set(args.benchmark))))


def get_current_git_branch(git_repo_path):
Expand All @@ -129,6 +108,12 @@ def get_current_git_branch(git_repo_path):
['git', '-C', git_repo_path, 'rev-parse',
'--abbrev-ref', 'HEAD'], stderr=subprocess.STDOUT).strip()

def get_git_head_ID(git_repo_path):
"""Return the short identifier for the HEAD commit of the repo
`git_repo_path`"""
return subprocess.check_output(
['git', '-C', git_repo_path, 'rev-parse',
'--short', 'HEAD'], stderr=subprocess.STDOUT).strip()

def log_results(log_directory, driver, formatted_output, swift_repo=None):
"""Log `formatted_output` to a branch specific directory in
Expand All @@ -138,6 +123,10 @@ def log_results(log_directory, driver, formatted_output, swift_repo=None):
branch = get_current_git_branch(swift_repo)
except (OSError, subprocess.CalledProcessError):
branch = None
try:
head_ID = '-' + get_git_head_ID(swift_repo)
except (OSError, subprocess.CalledProcessError):
head_ID = ''
timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
if branch:
output_directory = os.path.join(log_directory, branch)
Expand All @@ -149,7 +138,7 @@ def log_results(log_directory, driver, formatted_output, swift_repo=None):
except OSError:
pass
log_file = os.path.join(output_directory,
driver_name + '-' + timestamp + '.log')
driver_name + '-' + timestamp + head_ID + '.log')
print('Logging results to: %s' % log_file)
with open(log_file, 'w') as f:
f.write(formatted_output)
Expand All @@ -168,9 +157,8 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
if verbose and log_directory:
print(line_format.format(*headings))
for test in get_tests(driver):
if benchmarks and test not in benchmarks:
continue
totals = tuple([0] * 7)
for test in benchmarks:
test_output = instrument_test(driver, test, num_samples)
if test_output[0] == 'Totals':
continue
Expand All @@ -180,16 +168,12 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
else:
print(','.join(test_output))
output.append(test_output)
(samples, _min, _max, mean) = map(int, test_output[2:6])
totals = map(sum, zip(totals, map(int, test_output[3:])))
total_tests += 1
total_min += _min
total_max += _max
total_mean += mean
if not output:
return
formatted_output = '\n'.join([','.join(l) for l in output])
totals = map(str, ['Totals', total_tests, total_min, total_max,
total_mean, '0', '0', '0'])
totals = map(str, ['Totals', total_tests] + totals)
totals_output = '\n\n' + ','.join(totals)
if verbose:
if log_directory:
Expand Down Expand Up @@ -219,7 +203,7 @@ def submit(args):
file = os.path.join(args.tests, "Benchmark_" + optset)
try:
res = run_benchmarks(
file, benchmarks=args.benchmark,
file, benchmarks=get_tests(file, args),
num_samples=args.iterations)
data['Tests'].extend(parse_results(res, optset))
except subprocess.CalledProcessError as e:
Expand All @@ -243,7 +227,7 @@ def run(args):
optset = args.optimization
file = os.path.join(args.tests, "Benchmark_" + optset)
run_benchmarks(
file, benchmarks=args.benchmarks,
file, benchmarks=get_tests(file, args),
num_samples=args.iterations, verbose=True,
log_directory=args.output_dir,
swift_repo=args.swift_repo)
Expand Down Expand Up @@ -346,17 +330,40 @@ def positive_int(value):


def main():
parser = argparse.ArgumentParser(description='Swift benchmarks driver')
subparsers = parser.add_subparsers()

submit_parser = subparsers.add_parser(
'submit',
help='run benchmarks and submit results to LNT')
submit_parser.add_argument(
parser = argparse.ArgumentParser(
epilog='Example: ./Benchmark_Driver run -i 5 -f Array'
)
subparsers = parser.add_subparsers(
title='Swift benchmark driver commands',
help='See COMMAND -h for additional arguments', metavar='<command>')

parent_parser = argparse.ArgumentParser(add_help=False)
benchmarks_group = parent_parser.add_mutually_exclusive_group()
benchmarks_group.add_argument(
'benchmark',
default=[],
help='benchmark to run (default: all)', nargs='*', metavar="BENCHMARK")
benchmarks_group.add_argument(
'-f', '--filter',
help='run all tests whose name starts with PREFIX', metavar="PREFIX")
parent_parser.add_argument(
'-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)

submit_parser = subparsers.add_parser(
'submit',
help='Run benchmarks and submit results to LNT',
parents=[parent_parser])
submit_parser.add_argument(
'-o', '--optimization', nargs='+',
help='optimization levels to use (default: O Onone Ounchecked)',
default=['O', 'Onone', 'Ounchecked'])
submit_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 10)',
type=positive_int, default=10)
submit_parser.add_argument(
'-m', '--machine', required=True,
help='LNT machine name')
Expand All @@ -366,48 +373,32 @@ def main():
submit_parser.add_argument(
'-l', '--lnt_host', required=True,
help='LNT host to submit results to')
submit_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 10)',
type=positive_int, default=10)
submit_parser.add_argument(
'-o', '--optimization', nargs='+',
help='optimization levels to use (default: O Onone Ounchecked)',
default=['O', 'Onone', 'Ounchecked'])
submit_parser.add_argument(
'benchmark',
help='benchmark to run (default: all)', nargs='*')
submit_parser.set_defaults(func=submit)

run_parser = subparsers.add_parser(
'run',
help='run benchmarks and output results to stdout')
help='Run benchmarks and output results to stdout',
parents=[parent_parser])
run_parser.add_argument(
'-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
'-o', '--optimization',
metavar='OPT',
choices=['O', 'Onone', 'Ounchecked'],
help='optimization level to use (default: O)', default='O')
run_parser.add_argument(
'-i', '--iterations',
help='number of times to run each test (default: 1)',
type=positive_int, default=1)
run_parser.add_argument(
'-o', '--optimization',
help='optimization level to use (default: O)', default='O')
run_parser.add_argument(
'--output-dir',
help='log results to directory (default: no logging)')
run_parser.add_argument(
'--swift-repo',
help='absolute path to Swift source repo for branch comparison')
run_parser.add_argument(
'benchmarks',
help='benchmark to run (default: all)', nargs='*')
run_parser.set_defaults(func=run)

compare_parser = subparsers.add_parser(
'compare',
help='compare benchmark results')
help='Compare benchmark results')
compare_parser.add_argument(
'--log-dir', required=True,
help='directory containing benchmark logs')
Expand Down
Loading