Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion benchmark/scripts/Benchmark_DTrace.in
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def process_input(self, data):
test_name = '({}_{})'.format(data['opt'], data['test_name'])
print "Running {}...".format(test_name)
print("Running {}...".format(test_name))
sys.stdout.flush()

def get_results_with_iters(iters):
Expand Down
58 changes: 29 additions & 29 deletions benchmark/scripts/Benchmark_Driver
Original file line number Diff line number Diff line change
Expand Up @@ -63,16 +63,16 @@ def parse_results(res, optset):
return tests

def submit_to_lnt(data, url):
print "\nSubmitting results to LNT server..."
print("\nSubmitting results to LNT server...")
json_report = {'input_data': json.dumps(data), 'commit': '1'}
data = urllib.urlencode(json_report)
response_str = urllib2.urlopen(urllib2.Request(url, data))
response = json.loads(response_str.read())
if 'success' in response:
print "Server response:\tSuccess"
print("Server response:\tSuccess")
else:
print "Server response:\tError"
print "Error:\t", response['error']
print("Server response:\tError")
print("Error:\t", response['error'])
sys.exit(1)

def instrument_test(driver_path, test, num_samples):
Expand Down Expand Up @@ -142,7 +142,7 @@ def log_results(log_directory, driver, formatted_output, swift_repo=None):
pass
log_file = os.path.join(output_directory,
driver_name + '-' + timestamp + '.log')
print 'Logging results to: %s' % log_file
print('Logging results to: %s' % log_file)
with open(log_file, 'w') as f:
f.write(formatted_output)

Expand All @@ -158,7 +158,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
'SD(μs)', 'MEDIAN(μs)', 'MAX_RSS(B)']
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
if verbose and log_directory:
print line_format.format(*headings)
print(line_format.format(*headings))
for test in get_tests(driver):
if benchmarks and test not in benchmarks:
continue
Expand All @@ -167,9 +167,9 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
continue
if verbose:
if log_directory:
print line_format.format(*test_output)
print(line_format.format(*test_output))
else:
print ','.join(test_output)
print(','.join(test_output))
output.append(test_output)
(samples, _min, _max, mean) = map(int, test_output[2:6])
total_tests += 1
Expand All @@ -184,36 +184,36 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
totals_output = '\n\n' + ','.join(totals)
if verbose:
if log_directory:
print line_format.format(*([''] + totals))
print(line_format.format(*([''] + totals)))
else:
print totals_output[1:]
print(totals_output[1:])
formatted_output += totals_output
if log_directory:
log_results(log_directory, driver, formatted_output, swift_repo)
return formatted_output

def submit(args):
print "SVN revision:\t", args.revision
print "Machine name:\t", args.machine
print "Iterations:\t", args.iterations
print "Optimizations:\t", ','.join(args.optimization)
print "LNT host:\t", args.lnt_host
print("SVN revision:\t", args.revision)
print("Machine name:\t", args.machine)
print("Iterations:\t", args.iterations)
print("Optimizations:\t", ','.join(args.optimization))
print("LNT host:\t", args.lnt_host)
starttime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
print "Start time:\t", starttime
print("Start time:\t", starttime)
data = {}
data['Tests'] = []
data['Machine'] = {'Info': {'name': args.machine}, 'Name': args.machine}
print "\nRunning benchmarks..."
print("\nRunning benchmarks...")
for optset in args.optimization:
print "Opt level:\t", optset
print("Opt level:\t", optset)
file = os.path.join(args.tests, "Benchmark_" + optset)
try:
res = run_benchmarks(file, benchmarks=args.benchmark,
num_samples=args.iterations)
data['Tests'].extend(parse_results(res, optset))
except subprocess.CalledProcessError as e:
print "Execution failed.. Test results are empty."
print "Process output:\n", e.output
print("Execution failed.. Test results are empty.")
print("Process output:\n", e.output)

endtime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
data['Run'] = {'End Time': endtime,
Expand All @@ -222,7 +222,7 @@ def submit(args):
'tag': 'nts',
'test_suite_revision': 'None'},
'Start Time': starttime}
print "End time:\t", endtime
print("End time:\t", endtime)

submit_to_lnt(data, args.lnt_host)
return 0
Expand All @@ -242,7 +242,7 @@ def format_name(log_path):

def compare_logs(compare_script, new_log, old_log):
"""Return diff of log files at paths `new_log` and `old_log`"""
print 'Comparing %s %s ...' % (format_name(old_log), format_name(new_log))
print('Comparing %s %s ...' % (format_name(old_log), format_name(new_log)))
subprocess.call([compare_script, old_log, new_log])

def compare(args):
Expand All @@ -254,9 +254,9 @@ def compare(args):
master_branch_dir = os.path.join(log_dir, 'master')

if current_branch != 'master' and not os.path.isdir(master_branch_dir):
print 'Unable to find benchmark logs for master branch. Set a ' + \
'baseline benchmark log by passing --benchmark to ' + \
'build-script while on master branch.'
print('Unable to find benchmark logs for master branch. Set a ' +
'baseline benchmark log by passing --benchmark to ' +
'build-script while on master branch.')
return 1

recent_logs = {}
Expand All @@ -276,17 +276,17 @@ def compare(args):
recent_logs['master_Onone'][0],
recent_logs['master_Onone'][1])
else:
print 'master/master comparison skipped: no previous master logs'
print('master/master comparison skipped: no previous master logs')
else:
# TODO: Check for outdated master branch log
if len(recent_logs[current_branch + '_O']) == 0 or \
len(recent_logs[current_branch + '_Onone']) == 0:
print 'branch sanity failure: missing branch logs'
print('branch sanity failure: missing branch logs')
return 1

if len(recent_logs[current_branch + '_O']) == 1 or \
len(recent_logs[current_branch + '_Onone']) == 1:
print 'branch/branch comparison skipped: no previous branch logs'
print('branch/branch comparison skipped: no previous branch logs')
else:
compare_logs(compare_script,
recent_logs[current_branch + '_O'][0],
Expand All @@ -297,7 +297,7 @@ def compare(args):

if len(recent_logs['master_O']) == 0 or \
len(recent_logs['master_Onone']) == 0:
print 'branch/master failure: no master logs'
print('branch/master failure: no master logs')
return 1
else:
compare_logs(compare_script,
Expand Down
2 changes: 1 addition & 1 deletion benchmark/scripts/Benchmark_GuardMalloc.in
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def process_input(self, data):
test_name = '({},{})'.format(data['opt'], data['test_name'])
print "Running {}...".format(test_name)
print("Running {}...".format(test_name))
sys.stdout.flush()
status = subprocess.call([data['path'], data['test_name'], '--num-iters=2'],
env=data['env'], stderr=open('/dev/null', 'w'),
Expand Down
4 changes: 2 additions & 2 deletions benchmark/scripts/Benchmark_RuntimeLeaksRunner.in
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):

def process_input(self, data):
test_name = '({},{})'.format(data['opt'], data['test_name'])
print "Running {}...".format(test_name)
print("Running {}...".format(test_name))
sys.stdout.flush()
try:
p = subprocess.Popen([data['path'], "--run-all", "--num-samples=2",
Expand All @@ -95,7 +95,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):

return LeaksRunnerResult(test_name, (d['objc_count'] + d['swift_count']) > 0)
except (KeyError, ValueError):
print "Failed parse output! (%s,%s)" % (data['path'], data['test_name'])
print("Failed parse output! (%s,%s)" % (data['path'], data['test_name']))
return LeaksRunnerResult(test_name, True)

SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
Expand Down
54 changes: 27 additions & 27 deletions benchmark/scripts/compare_perf_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def get_scores(fname):
try:
for line in f:
if VERBOSE:
print "Parsing", line,
print("Parsing", line,)
m = SCORERE.match(line)
is_total = False
if not m:
Expand All @@ -60,7 +60,7 @@ def get_scores(fname):
continue

if VERBOSE:
print " match", m.group(KEYGROUP), m.group(BESTGROUP)
print(" match", m.group(KEYGROUP), m.group(BESTGROUP))

if not m.group(KEYGROUP) in scores:
scores[m.group(KEYGROUP)] = []
Expand All @@ -81,8 +81,8 @@ def is_max_score(newscore, maxscore, invert):
return not maxscore or (newscore > maxscore if not invert else newscore < maxscore)

def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
print num.rjust(3),
print key.ljust(25),
print(num.rjust(3),)
print(key.ljust(25),)
bestscore1 = None
bestscore2 = None
worstscore1 = None
Expand Down Expand Up @@ -130,32 +130,32 @@ def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
Num, Den = Den, Num
print ("%.2fx" % (Num / Den)).rjust(9),
else:
print "*".rjust(9),
print("*".rjust(9),)
if ShowSpeedup:
print "*".rjust(9),
print("*".rjust(9),)
# check if the worst->best interval for each configuration overlap.
if minbest:
if (bestscore1 < bestscore2 and bestscore2 < worstscore1) \
or (bestscore2 < bestscore1 and bestscore1 < worstscore2):
print "(?)",
print("(?)",)
else:
if (worstscore1 < worstscore2 and worstscore2 < bestscore1) \
or (worstscore2 < worstscore1 and worstscore1 < bestscore2):
print "(?)",
print
print("(?)",)
print()

def print_best_scores(key, scores):
print key,
print(key,)
bestscore = None
minbest = IsTime
for score in scores:
if is_max_score(newscore=score, maxscore=bestscore, invert=minbest):
bestscore = score
print ", %d" % bestscore
print(", %d" % bestscore)

def usage():
print "repeat.sh <n> Benchmark_O[none|unchecked] > file.times"
print "compare_perf_tests.py <file.times> [<file2.times>]"
print("repeat.sh <n> Benchmark_O[none|unchecked] > file.times")
print("compare_perf_tests.py <file.times> [<file2.times>]")

if __name__ == '__main__':
if len(sys.argv) < 2:
Expand All @@ -182,35 +182,35 @@ def usage():
runs = runs2

if VERBOSE:
print scores1
print scores2
print(scores1)
print(scores2)

keys = list(set(scores1.keys() + scores2.keys()))
keys.sort()
if VERBOSE:
print "comparing ", file1, "vs", file2, "=",
print("comparing ", file1, "vs", file2, "=",)
if IsTime:
print file1, "/", file2
print(file1, "/", file2)
else:
print file2, "/", file1
print(file2, "/", file1)

print "#".rjust(3),
print "TEST".ljust(25),
print("#".rjust(3),)
print("TEST".ljust(25),)
if PrintAllScores:
for i in range(0, runs):
print ("OLD_RUN%d" % i).rjust(9),
print(("OLD_RUN%d" % i).rjust(9),)
for i in range(0, runs):
print ("NEW_RUN%d" % i).rjust(9),
print(("NEW_RUN%d" % i).rjust(9),)
else:
print "BEST_OLD_MIN(μs)".rjust(17),
print "BEST_NEW_MIN(μs)".rjust(17),
print 'DELTA'.rjust(9), '%DELTA'.rjust(9), 'SPEEDUP'.rjust(9)
print("BEST_OLD_MIN(μs)".rjust(17),)
print("BEST_NEW_MIN(μs)".rjust(17),)
print('DELTA'.rjust(9), '%DELTA'.rjust(9), 'SPEEDUP'.rjust(9))

for key in keys:
if key not in scores1:
print key, "not in", file1
print(key, "not in", file1)
continue
if key not in scores2:
print key, "not in", file2
print(key, "not in", file2)
continue
compare_scores(key, scores1[key], worstscores1[key], scores2[key], worstscores2[key], runs, nums[key])
2 changes: 1 addition & 1 deletion benchmark/scripts/generate_harness/generate_harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def find_run_funcs(dirs):
for template_file in template_map:
template_path = os.path.join(script_dir, template_file)
template = template_env.get_template(template_path)
print template_map[template_file]
print(template_map[template_file])
open(template_map[template_file], 'w').write(
template.render(tests=tests,
multisource_benches=multisource_benches,
Expand Down
2 changes: 1 addition & 1 deletion utils/rth
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ VERBOSE = True

def verbose_print_command(command):
if VERBOSE:
print " ".join(pipes.quote(c) for c in command)
print(" ".join(pipes.quote(c) for c in command))
sys.stdout.flush()

class ResilienceTest(object):
Expand Down