From bac7c4533b0dcc542c9fb73c790a5d00acf36ec3 Mon Sep 17 00:00:00 2001 From: Valdis Date: Thu, 16 Aug 2018 10:21:53 +0300 Subject: [PATCH 01/39] ignore intellij files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 4138717..c37ddf5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ bin/.coursier bin/.scalafmt* results/ *.iprof +.idea \ No newline at end of file From 9ef2babe320f9bd439299175ff903f34766808df Mon Sep 17 00:00:00 2001 From: Valdis Date: Thu, 16 Aug 2018 11:03:18 +0300 Subject: [PATCH 02/39] give 2 GB heap for compiling --- scripts/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run.py b/scripts/run.py index 73cf91b..1b0a265 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -35,7 +35,7 @@ def run(cmd): return subp.check_output(cmd) def compile(bench, compilecmd): - cmd = [sbt, 'clean'] + cmd = [sbt, '-J-Xmx2G', 'clean'] cmd.append('set mainClass in Compile := Some("{}")'.format(bench)) cmd.append(compilecmd) return run(cmd) From 182b5978a53b662be078091cf2df5b3797b90b84 Mon Sep 17 00:00:00 2001 From: Valdis Date: Fri, 14 Sep 2018 16:35:55 +0200 Subject: [PATCH 03/39] latest snapshot --- confs/scala-native-0.3.9-SNAPSHOT/build.sbt | 5 +++++ confs/scala-native-0.3.9-SNAPSHOT/compile | 1 + confs/scala-native-0.3.9-SNAPSHOT/plugins.sbt | 1 + confs/scala-native-0.3.9-SNAPSHOT/run | 1 + 4 files changed, 8 insertions(+) create mode 100644 confs/scala-native-0.3.9-SNAPSHOT/build.sbt create mode 100644 confs/scala-native-0.3.9-SNAPSHOT/compile create mode 100644 confs/scala-native-0.3.9-SNAPSHOT/plugins.sbt create mode 100644 confs/scala-native-0.3.9-SNAPSHOT/run diff --git a/confs/scala-native-0.3.9-SNAPSHOT/build.sbt b/confs/scala-native-0.3.9-SNAPSHOT/build.sbt new file mode 100644 index 0000000..b4a5690 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT/build.sbt @@ -0,0 +1,5 @@ +scalaVersion := "2.11.12" +enablePlugins(ScalaNativePlugin) +nativeLinkStubs := true +nativeGC := "immix" +nativeMode := "release" diff --git a/confs/scala-native-0.3.9-SNAPSHOT/compile b/confs/scala-native-0.3.9-SNAPSHOT/compile new file mode 100644 index 0000000..2f3f09f --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT/compile @@ -0,0 +1 @@ +nativeLink diff --git a/confs/scala-native-0.3.9-SNAPSHOT/plugins.sbt b/confs/scala-native-0.3.9-SNAPSHOT/plugins.sbt new file mode 100644 index 0000000..c1423b6 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT/plugins.sbt @@ -0,0 +1 @@ +addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.3.9-SNAPSHOT") diff --git a/confs/scala-native-0.3.9-SNAPSHOT/run b/confs/scala-native-0.3.9-SNAPSHOT/run new file mode 100644 index 0000000..ae89e34 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT/run @@ -0,0 +1 @@ +target/scala-2.11/scala-native-benchmarks-out From 8d6c28a444e4ea0caf65b362b20d6311b4b88efa Mon Sep 17 00:00:00 2001 From: Valdis Date: Fri, 21 Sep 2018 13:03:54 +0200 Subject: [PATCH 04/39] using latest snapshot and latest stable version --- confs/scala-native-0.3.7/plugins.sbt | 1 - confs/{scala-native-0.3.7 => scala-native-0.3.8}/build.sbt | 0 confs/{scala-native-0.3.7 => scala-native-0.3.8}/compile | 0 confs/scala-native-0.3.8/plugins.sbt | 1 + confs/{scala-native-0.3.7 => scala-native-0.3.8}/run | 0 scripts/run.py | 3 ++- 6 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 confs/scala-native-0.3.7/plugins.sbt rename confs/{scala-native-0.3.7 => scala-native-0.3.8}/build.sbt (100%) rename confs/{scala-native-0.3.7 => scala-native-0.3.8}/compile (100%) create mode 100644 confs/scala-native-0.3.8/plugins.sbt rename confs/{scala-native-0.3.7 => scala-native-0.3.8}/run (100%) diff --git a/confs/scala-native-0.3.7/plugins.sbt b/confs/scala-native-0.3.7/plugins.sbt deleted file mode 100644 index afc9d5a..0000000 --- a/confs/scala-native-0.3.7/plugins.sbt +++ /dev/null @@ -1 +0,0 @@ -addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.3.7") diff --git a/confs/scala-native-0.3.7/build.sbt b/confs/scala-native-0.3.8/build.sbt similarity index 100% rename from confs/scala-native-0.3.7/build.sbt rename to confs/scala-native-0.3.8/build.sbt diff --git a/confs/scala-native-0.3.7/compile b/confs/scala-native-0.3.8/compile similarity index 100% rename from confs/scala-native-0.3.7/compile rename to confs/scala-native-0.3.8/compile diff --git a/confs/scala-native-0.3.8/plugins.sbt b/confs/scala-native-0.3.8/plugins.sbt new file mode 100644 index 0000000..2d38aa0 --- /dev/null +++ b/confs/scala-native-0.3.8/plugins.sbt @@ -0,0 +1 @@ +addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.3.8") diff --git a/confs/scala-native-0.3.7/run b/confs/scala-native-0.3.8/run similarity index 100% rename from confs/scala-native-0.3.7/run rename to confs/scala-native-0.3.8/run diff --git a/scripts/run.py b/scripts/run.py index 1b0a265..1ee3444 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -62,7 +62,8 @@ def compile(bench, compilecmd): configurations = [ 'jvm', - 'scala-native-0.3.7', + 'scala-native-0.3.8', + 'scala-native-0.3.9-SNAPSHOT', ] if 'GRAALVM_HOME' in os.environ: From 43915f138cfbd339b943a97e274879b2b25a2bde Mon Sep 17 00:00:00 2001 From: Valdis Date: Fri, 21 Sep 2018 14:18:31 +0200 Subject: [PATCH 05/39] enable running just a part of the benchmarks --- scripts/run.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/scripts/run.py b/scripts/run.py index 1ee3444..9e889f8 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -60,14 +60,20 @@ def compile(bench, compilecmd): 'sudoku.SudokuBenchmark', ] -configurations = [ + +baseline = [ 'jvm', 'scala-native-0.3.8', +] + +latest = [ 'scala-native-0.3.9-SNAPSHOT', ] +configurations = baseline + latest + if 'GRAALVM_HOME' in os.environ: - configurations += [ + baseline += [ 'native-image', 'native-image-pgo', ] @@ -77,6 +83,11 @@ def compile(bench, compilecmd): batch_size = 1 if __name__ == "__main__": + if "baseline" in sys.argv: + configurations = baseline + elif "latest" in sys.argv: + configurations = latest + for conf in configurations: for bench in benchmarks: print('--- conf: {}, bench: {}'.format(conf, bench)) @@ -92,7 +103,7 @@ def compile(bench, compilecmd): os.remove('build.sbt') if os.path.exists(os.path.join('confs', conf, 'plugins.sbt')): - sh.copyfile(os.path.join('confs', conf, 'plugins.sbt'), 'project/build.sbt') + sh.copyfile(os.path.join('confs', conf, 'plugins.sbt'), 'project/plugins.sbt') else: os.remove('project/plugins.sbt') From 8f21f8b4716f51da32178de0f350d4a93b52d068 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sat, 22 Sep 2018 15:17:31 +0200 Subject: [PATCH 06/39] dependency install script --- scripts/install-dependecies.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100755 scripts/install-dependecies.sh diff --git a/scripts/install-dependecies.sh b/scripts/install-dependecies.sh new file mode 100755 index 0000000..c0dea34 --- /dev/null +++ b/scripts/install-dependecies.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +sudo pip2 install numpy matplotlib \ No newline at end of file From 62bfd926d9e2027ad030bbc7d4412ab3884724e8 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sat, 22 Sep 2018 15:18:08 +0200 Subject: [PATCH 07/39] attempt at a barchart --- scripts/run.py | 4 ++-- scripts/summary.py | 21 ++++++++++++++++++++- 2 files changed, 22 insertions(+), 3 deletions(-) mode change 100644 => 100755 scripts/summary.py diff --git a/scripts/run.py b/scripts/run.py index 9e889f8..749ef7f 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 import sys import os import errno @@ -62,7 +62,7 @@ def compile(bench, compilecmd): baseline = [ - 'jvm', +# 'jvm', 'scala-native-0.3.8', ] diff --git a/scripts/summary.py b/scripts/summary.py old mode 100644 new mode 100755 index b953295..1eaa024 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -1,6 +1,9 @@ +#!/usr/bin/env python2 from run import benchmarks, runs, configurations import numpy as np +import matplotlib +import matplotlib.pyplot as plt def config_data(bench, conf): out = [] @@ -33,11 +36,27 @@ def peak_performance(): out.append(res) return out +def p50_chart(plt): + ind = np.arange(len(benchmarks)) + for conf in configurations: + res = [] + for bench in benchmarks: + try: + res.append(np.percentile(config_data(bench, conf), 50)) + except IndexError: + res.append(0) + plt.bar(ind, res, align='center', label=conf) + plt.xticks(ind, map(lambda x: x.split(".")[0],benchmarks)) + plt.legend() + plt.show() + + if __name__ == '__main__': leading = ['name'] for conf in configurations: leading.append(conf) print ','.join(leading) for bench, res in zip(benchmarks, peak_performance()): - print ','.join([bench] + list(map(str, res))) + print ','.join([bench.split(".")[0]] + list(map(str, res))) + p50_chart(plt) From a901a9725e2f53b95a090b5aaf3257a80465c70d Mon Sep 17 00:00:00 2001 From: Valdis Date: Sat, 22 Sep 2018 15:18:08 +0200 Subject: [PATCH 08/39] attempt at a barchart --- scripts/run.py | 2 +- scripts/summary.py | 21 ++++++++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) mode change 100644 => 100755 scripts/summary.py diff --git a/scripts/run.py b/scripts/run.py index 9e889f8..14fde56 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 import sys import os import errno diff --git a/scripts/summary.py b/scripts/summary.py old mode 100644 new mode 100755 index b953295..1eaa024 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -1,6 +1,9 @@ +#!/usr/bin/env python2 from run import benchmarks, runs, configurations import numpy as np +import matplotlib +import matplotlib.pyplot as plt def config_data(bench, conf): out = [] @@ -33,11 +36,27 @@ def peak_performance(): out.append(res) return out +def p50_chart(plt): + ind = np.arange(len(benchmarks)) + for conf in configurations: + res = [] + for bench in benchmarks: + try: + res.append(np.percentile(config_data(bench, conf), 50)) + except IndexError: + res.append(0) + plt.bar(ind, res, align='center', label=conf) + plt.xticks(ind, map(lambda x: x.split(".")[0],benchmarks)) + plt.legend() + plt.show() + + if __name__ == '__main__': leading = ['name'] for conf in configurations: leading.append(conf) print ','.join(leading) for bench, res in zip(benchmarks, peak_performance()): - print ','.join([bench] + list(map(str, res))) + print ','.join([bench.split(".")[0]] + list(map(str, res))) + p50_chart(plt) From 7cd093a1b287af0fa2d7bf86b90ac6e1f122e0d8 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 13:10:40 +0200 Subject: [PATCH 09/39] save charts to reports --- .gitignore | 1 + scripts/install-dependecies.sh | 3 +- scripts/summary.py | 63 ++++++++++++++++++++++++++++------ 3 files changed, 55 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index c37ddf5..dfc26b9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ bin/.coursier bin/.scalafmt* results/ +reports/ *.iprof .idea \ No newline at end of file diff --git a/scripts/install-dependecies.sh b/scripts/install-dependecies.sh index c0dea34..3b348d2 100755 --- a/scripts/install-dependecies.sh +++ b/scripts/install-dependecies.sh @@ -1,2 +1,3 @@ #!/usr/bin/env bash -sudo pip2 install numpy matplotlib \ No newline at end of file +sudo pip2 install numpy matplotlib +sudo apt update && sudo apt install python-tk \ No newline at end of file diff --git a/scripts/summary.py b/scripts/summary.py index 1eaa024..84376da 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -1,10 +1,11 @@ #!/usr/bin/env python2 -from run import benchmarks, runs, configurations +from run import benchmarks, runs, configurations, mkdir import numpy as np import matplotlib import matplotlib.pyplot as plt + def config_data(bench, conf): out = [] for run in xrange(runs): @@ -24,39 +25,79 @@ def config_data(bench, conf): pass return np.array(out) -def peak_performance(): + +def hot_config_data(bench, conf): + out = [] + for run in xrange(runs): + try: + points = [] + with open('results/{}/{}/{}'.format(conf, bench, run)) as data: + for line in data.readlines(): + points.append(float(line)) + # take only last 1000 to account for startup + out += points[-1000:] + except IOError: + pass + return np.array(out) + + +def peak_performance(percentile): out = [] for bench in benchmarks: res = [] for conf in configurations: try: - res.append(np.percentile(config_data(bench, conf), 50)) + res.append(np.percentile(config_data(bench, conf), percentile)) except IndexError: res.append(0) out.append(res) return out -def p50_chart(plt): + +# not good +def bar_chart(plt, percentile): ind = np.arange(len(benchmarks)) for conf in configurations: res = [] for bench in benchmarks: try: - res.append(np.percentile(config_data(bench, conf), 50)) + res.append(np.percentile(config_data(bench, conf), percentile)) except IndexError: res.append(0) plt.bar(ind, res, align='center', label=conf) - plt.xticks(ind, map(lambda x: x.split(".")[0],benchmarks)) + plt.xticks(ind, map(benchmark_short_name, benchmarks)) plt.legend() - plt.show() + return plt -if __name__ == '__main__': +def percentiles_chart(plt, bench, limit=99): + for conf in configurations: + data = hot_config_data(bench, conf) + percentiles = np.arange(0, limit) + percvalue = np.array([np.percentile(data, perc) for perc in percentiles]) + plt.plot(percentiles, percvalue, label = conf) + plt.legend() + plt.title(bench) + return plt + +def print_table(data): leading = ['name'] for conf in configurations: leading.append(conf) print ','.join(leading) - for bench, res in zip(benchmarks, peak_performance()): - print ','.join([bench.split(".")[0]] + list(map(str, res))) - p50_chart(plt) + for bench, res in zip(benchmarks, data): + print ','.join([benchmark_short_name(bench)] + list(map(str, res))) + + +def benchmark_short_name(bench): + return bench.split(".")[0] + +if __name__ == '__main__': + print_table(peak_performance(50)) + # bar_chart(plt, 50).show() + mkdir("reports") + for bench in benchmarks: + percentiles_chart(plt, bench).savefig("reports/percentile_" + bench + ".png") + plt.clf() + plt.cla() From 7d81c5779de98053064598da7f15fa15ded243ef Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 13:31:45 +0200 Subject: [PATCH 10/39] include jvm as well --- scripts/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run.py b/scripts/run.py index 749ef7f..0107187 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -62,7 +62,7 @@ def compile(bench, compilecmd): baseline = [ -# 'jvm', + 'jvm', 'scala-native-0.3.8', ] From 03bed2e4343ff84582130dde3a40ada87a4b26a6 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 13:34:28 +0200 Subject: [PATCH 11/39] do not remove 1% outliers when doing percentiles anyway --- scripts/summary.py | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index 84376da..c91fcd5 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -2,31 +2,12 @@ from run import benchmarks, runs, configurations, mkdir import numpy as np +import time import matplotlib import matplotlib.pyplot as plt def config_data(bench, conf): - out = [] - for run in xrange(runs): - try: - points = [] - with open('results/{}/{}/{}'.format(conf, bench, run)) as data: - for line in data.readlines(): - points.append(float(line)) - # take only last 1000 to account for startup - points = points[-1000:] - # filter out 1% worst measurements as outliers - pmax = np.percentile(points, 99) - for point in points: - if point <= pmax: - out.append(point) - except IOError: - pass - return np.array(out) - - -def hot_config_data(bench, conf): out = [] for run in xrange(runs): try: @@ -41,7 +22,7 @@ def hot_config_data(bench, conf): return np.array(out) -def peak_performance(percentile): +def percentile(percentile): out = [] for bench in benchmarks: res = [] @@ -72,14 +53,17 @@ def bar_chart(plt, percentile): def percentiles_chart(plt, bench, limit=99): for conf in configurations: - data = hot_config_data(bench, conf) + data = config_data(bench, conf) percentiles = np.arange(0, limit) percvalue = np.array([np.percentile(data, perc) for perc in percentiles]) - plt.plot(percentiles, percvalue, label = conf) + plt.plot(percentiles, percvalue, label=conf) plt.legend() plt.title(bench) + plt.xlabel("Percentile (%)") + plt.ylabel("Run time (s)") return plt + def print_table(data): leading = ['name'] for conf in configurations: @@ -94,10 +78,11 @@ def benchmark_short_name(bench): if __name__ == '__main__': - print_table(peak_performance(50)) + print_table(percentile(50)) # bar_chart(plt, 50).show() - mkdir("reports") + rootdir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "/" + mkdir(rootdir) for bench in benchmarks: - percentiles_chart(plt, bench).savefig("reports/percentile_" + bench + ".png") + percentiles_chart(plt, bench).savefig(rootdir + "percentile_" + bench + ".png") plt.clf() plt.cla() From 263d4b40413a93c0f4ad9c57aa509e06b3601725 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 16:58:03 +0200 Subject: [PATCH 12/39] all the charts inside markdown --- scripts/summary.py | 55 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index c91fcd5..da974bf 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -3,8 +3,10 @@ import numpy as np import time +import sys import matplotlib import matplotlib.pyplot as plt +import os def config_data(bench, conf): @@ -59,7 +61,7 @@ def percentiles_chart(plt, bench, limit=99): plt.plot(percentiles, percvalue, label=conf) plt.legend() plt.title(bench) - plt.xlabel("Percentile (%)") + plt.xlabel("Percentile") plt.ylabel("Run time (s)") return plt @@ -70,7 +72,26 @@ def print_table(data): leading.append(conf) print ','.join(leading) for bench, res in zip(benchmarks, data): - print ','.join([benchmark_short_name(bench)] + list(map(str, res))) + print ','.join([bench] + list(map(str, res))) + + +def write_md_table(file, data): + leading = ['name'] + for conf in configurations: + leading.append(conf) + file.write('|') + file.write(' | '.join(leading)) + file.write('|\n') + + file.write('|') + for _ in leading: + file.write(' -- |') + file.write('\n') + + for bench, res in zip(benchmarks, data): + file.write('|') + file.write('|'.join([bench] + list(map(str, res)))) + file.write('|\n') def benchmark_short_name(bench): @@ -78,11 +99,31 @@ def benchmark_short_name(bench): if __name__ == '__main__': + if len(sys.argv) > 1: + configurations = sys.argv[1:] print_table(percentile(50)) # bar_chart(plt, 50).show() - rootdir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "/" + rootdir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + "_vs_".join(configurations) + "/" mkdir(rootdir) - for bench in benchmarks: - percentiles_chart(plt, bench).savefig(rootdir + "percentile_" + bench + ".png") - plt.clf() - plt.cla() + with open(os.path.join(rootdir, "Readme.md"), 'w+') as md_file: + md_file.write("# Summary\n") + md_file.write("## Benchmark run time (s) at 50 percentile \n") + write_md_table(md_file, percentile(50)) + md_file.write("## Benchmark run time (s) at 90 percentile \n") + write_md_table(md_file, percentile(90)) + md_file.write("## Benchmark run time (s) at 99 percentile \n") + write_md_table(md_file, percentile(99)) + + md_file.write("# Individual benchmarks\n") + for bench in benchmarks: + md_file.write("## ") + md_file.write(bench) + md_file.write("\n") + + chart_name = "percentile_" + bench + ".png" + chart_file = rootdir + chart_name + percentiles_chart(plt, bench).savefig(chart_file) + plt.clf() + plt.cla() + + md_file.write("![Chart]({})\n".format(chart_name)) \ No newline at end of file From 5abefad07051602a0f10118ffceaa12b2ea2ac6c Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 17:25:15 +0200 Subject: [PATCH 13/39] links, using seconds, correct yaxis --- scripts/summary.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index da974bf..964c484 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -16,7 +16,7 @@ def config_data(bench, conf): points = [] with open('results/{}/{}/{}'.format(conf, bench, run)) as data: for line in data.readlines(): - points.append(float(line)) + points.append(float(line) / 1000000) # take only last 1000 to account for startup out += points[-1000:] except IOError: @@ -61,6 +61,7 @@ def percentiles_chart(plt, bench, limit=99): plt.plot(percentiles, percvalue, label=conf) plt.legend() plt.title(bench) + plt.ylim(ymin=0) plt.xlabel("Percentile") plt.ylabel("Run time (s)") return plt @@ -90,10 +91,14 @@ def write_md_table(file, data): for bench, res in zip(benchmarks, data): file.write('|') - file.write('|'.join([bench] + list(map(str, res)))) + file.write('|'.join([benchmark_md_link(bench)] + list(map(str, res)))) file.write('|\n') +def benchmark_md_link(bench): + return "[{}]({})".format(bench, bench.replace(".","").lower()) + + def benchmark_short_name(bench): return bench.split(".")[0] From 97588e3715e8b405b84f1aa15470a0e740f78ce1 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 17:49:55 +0200 Subject: [PATCH 14/39] decimal places and replative difference --- scripts/summary.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index 964c484..36a51e1 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -77,26 +77,35 @@ def print_table(data): def write_md_table(file, data): - leading = ['name'] - for conf in configurations: - leading.append(conf) + header = ['name'] + header.append(configurations[0]) + for conf in configurations[1:]: + header.append(conf) + header.append("") file.write('|') - file.write(' | '.join(leading)) + file.write(' | '.join(header)) file.write('|\n') file.write('|') - for _ in leading: + for _ in header: file.write(' -- |') file.write('\n') - for bench, res in zip(benchmarks, data): + for bench, res0 in zip(benchmarks, data): + base = res0[0] + res = [("%.4f" % base)] + sum(map(lambda x: cell(x, base), res0[1:]), []) file.write('|') - file.write('|'.join([benchmark_md_link(bench)] + list(map(str, res)))) + file.write('|'.join([benchmark_md_link(bench)] + list(res))) file.write('|\n') +def cell(x, base): + percent_diff = (float(x) / base - 1) * 100 + return [("%.4f" % x), ("+" if percent_diff > 0 else "") + ("%.2f" % percent_diff) + "%"] + + def benchmark_md_link(bench): - return "[{}]({})".format(bench, bench.replace(".","").lower()) + return "[{}]({})".format(bench, bench.replace(".", "").lower()) def benchmark_short_name(bench): @@ -131,4 +140,4 @@ def benchmark_short_name(bench): plt.clf() plt.cla() - md_file.write("![Chart]({})\n".format(chart_name)) \ No newline at end of file + md_file.write("![Chart]({})\n".format(chart_name)) From c92239963224cd761c41d92266c7b0a8d6517f0b Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 18:02:19 +0200 Subject: [PATCH 15/39] make it bold when there is an improvement --- scripts/summary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/summary.py b/scripts/summary.py index 36a51e1..d674a60 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -101,7 +101,7 @@ def write_md_table(file, data): def cell(x, base): percent_diff = (float(x) / base - 1) * 100 - return [("%.4f" % x), ("+" if percent_diff > 0 else "") + ("%.2f" % percent_diff) + "%"] + return [("%.4f" % x), ("+" if percent_diff > 0 else "__") + ("%.2f" % percent_diff) + "%" + ("" if percent_diff > 0 else "__")] def benchmark_md_link(bench): From 91c236ffd7bb6b33681437e5cad7c20b615e990e Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 18:29:02 +0200 Subject: [PATCH 16/39] relative bar chart --- scripts/summary.py | 81 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 57 insertions(+), 24 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index d674a60..13ec850 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -53,6 +53,34 @@ def bar_chart(plt, percentile): return plt +def bar_chart_relative(plt, percentile): + ind = np.arange(len(benchmarks)) + conf_count = len(configurations) + 1 + base = [] + ref = [] + for bench in benchmarks: + try: + base.append(np.percentile(config_data(bench, configurations[0]), percentile)) + ref.append(1.0) + except IndexError: + base.append(0) + ref.append(0.0) + plt.bar(ind * conf_count, ref, label=configurations[0]) + + for i,conf in enumerate(configurations[1:]): + res = [] + for bench, base_val in zip(benchmarks, base): + try: + res.append(np.percentile(config_data(bench, conf), percentile) / base_val) + except IndexError: + res.append(0) + plt.bar(ind * conf_count + i + 1, res, label=conf) + plt.xticks((ind * conf_count + (conf_count - 1)/2.0), map(benchmark_short_name, benchmarks)) + plt.title("Relative performance against " + configurations[0] + " at " + str(percentile) + " percentile") + plt.legend() + return plt + + def percentiles_chart(plt, bench, limit=99): for conf in configurations: data = config_data(bench, conf) @@ -101,7 +129,8 @@ def write_md_table(file, data): def cell(x, base): percent_diff = (float(x) / base - 1) * 100 - return [("%.4f" % x), ("+" if percent_diff > 0 else "__") + ("%.2f" % percent_diff) + "%" + ("" if percent_diff > 0 else "__")] + return [("%.4f" % x), + ("+" if percent_diff > 0 else "__") + ("%.2f" % percent_diff) + "%" + ("" if percent_diff > 0 else "__")] def benchmark_md_link(bench): @@ -112,32 +141,36 @@ def benchmark_short_name(bench): return bench.split(".")[0] +def write_md_file(md_file): + md_file.write("# Summary\n") + md_file.write("## Benchmark run time (s) at 50 percentile \n") + write_md_table(md_file, percentile(50)) + md_file.write("## Benchmark run time (s) at 90 percentile \n") + write_md_table(md_file, percentile(90)) + md_file.write("## Benchmark run time (s) at 99 percentile \n") + write_md_table(md_file, percentile(99)) + md_file.write("# Individual benchmarks\n") + for bench in benchmarks: + md_file.write("## ") + md_file.write(bench) + md_file.write("\n") + + chart_name = "percentile_" + bench + ".png" + chart_file = rootdir + chart_name + percentiles_chart(plt, bench).savefig(chart_file) + plt.clf() + plt.cla() + + md_file.write("![Chart]({})\n".format(chart_name)) + + if __name__ == '__main__': if len(sys.argv) > 1: configurations = sys.argv[1:] - print_table(percentile(50)) + # print_table(percentile(50)) # bar_chart(plt, 50).show() rootdir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + "_vs_".join(configurations) + "/" mkdir(rootdir) - with open(os.path.join(rootdir, "Readme.md"), 'w+') as md_file: - md_file.write("# Summary\n") - md_file.write("## Benchmark run time (s) at 50 percentile \n") - write_md_table(md_file, percentile(50)) - md_file.write("## Benchmark run time (s) at 90 percentile \n") - write_md_table(md_file, percentile(90)) - md_file.write("## Benchmark run time (s) at 99 percentile \n") - write_md_table(md_file, percentile(99)) - - md_file.write("# Individual benchmarks\n") - for bench in benchmarks: - md_file.write("## ") - md_file.write(bench) - md_file.write("\n") - - chart_name = "percentile_" + bench + ".png" - chart_file = rootdir + chart_name - percentiles_chart(plt, bench).savefig(chart_file) - plt.clf() - plt.cla() - - md_file.write("![Chart]({})\n".format(chart_name)) + # with open(os.path.join(rootdir, "Readme.md"), 'w+') as md_file: + # write_md_file(md_file) + bar_chart_relative(plt, 50).show() From 2869c687a20ab54094c2db50a759ca8eeb55c897 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 18:59:11 +0200 Subject: [PATCH 17/39] good bar charts for summary --- scripts/summary.py | 48 +++++++++++++++++----------------------------- 1 file changed, 18 insertions(+), 30 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index 13ec850..8436a2f 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -37,22 +37,6 @@ def percentile(percentile): return out -# not good -def bar_chart(plt, percentile): - ind = np.arange(len(benchmarks)) - for conf in configurations: - res = [] - for bench in benchmarks: - try: - res.append(np.percentile(config_data(bench, conf), percentile)) - except IndexError: - res.append(0) - plt.bar(ind, res, align='center', label=conf) - plt.xticks(ind, map(benchmark_short_name, benchmarks)) - plt.legend() - return plt - - def bar_chart_relative(plt, percentile): ind = np.arange(len(benchmarks)) conf_count = len(configurations) + 1 @@ -67,7 +51,7 @@ def bar_chart_relative(plt, percentile): ref.append(0.0) plt.bar(ind * conf_count, ref, label=configurations[0]) - for i,conf in enumerate(configurations[1:]): + for i, conf in enumerate(configurations[1:]): res = [] for bench, base_val in zip(benchmarks, base): try: @@ -75,8 +59,8 @@ def bar_chart_relative(plt, percentile): except IndexError: res.append(0) plt.bar(ind * conf_count + i + 1, res, label=conf) - plt.xticks((ind * conf_count + (conf_count - 1)/2.0), map(benchmark_short_name, benchmarks)) - plt.title("Relative performance against " + configurations[0] + " at " + str(percentile) + " percentile") + plt.xticks((ind * conf_count + (conf_count - 1) / 2.0), map(benchmark_short_name, benchmarks)) + plt.title("Relative test execution times against " + configurations[0] + " at " + str(percentile) + " percentile") plt.legend() return plt @@ -141,14 +125,19 @@ def benchmark_short_name(bench): return bench.split(".")[0] -def write_md_file(md_file): +def write_md_file(rootdir, md_file): md_file.write("# Summary\n") - md_file.write("## Benchmark run time (s) at 50 percentile \n") - write_md_table(md_file, percentile(50)) - md_file.write("## Benchmark run time (s) at 90 percentile \n") - write_md_table(md_file, percentile(90)) - md_file.write("## Benchmark run time (s) at 99 percentile \n") - write_md_table(md_file, percentile(99)) + for p in [50, 90, 99]: + md_file.write("## Benchmark run time (s) at {} percentile \n".format(p)) + chart_name = "relative_percentile_" + str(p) + ".png" + bar_chart_relative(plt, p).savefig(rootdir + chart_name) + plt.clf() + plt.cla() + + md_file.write("![Chart]({})\n\n".format(chart_name)) + + write_md_table(md_file, percentile(p)) + md_file.write("# Individual benchmarks\n") for bench in benchmarks: md_file.write("## ") @@ -168,9 +157,8 @@ def write_md_file(md_file): if len(sys.argv) > 1: configurations = sys.argv[1:] # print_table(percentile(50)) - # bar_chart(plt, 50).show() + plt.rcParams["figure.figsize"] = [16.0, 12.0] rootdir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + "_vs_".join(configurations) + "/" mkdir(rootdir) - # with open(os.path.join(rootdir, "Readme.md"), 'w+') as md_file: - # write_md_file(md_file) - bar_chart_relative(plt, 50).show() + with open(os.path.join(rootdir, "Readme.md"), 'w+') as md_file: + write_md_file(rootdir, md_file) From aaf0246b396ea781181a88816b8cd47102d31f02 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 19:34:07 +0200 Subject: [PATCH 18/39] fixed aligment --- scripts/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run.py b/scripts/run.py index 0107187..14fde56 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -62,7 +62,7 @@ def compile(bench, compilecmd): baseline = [ - 'jvm', + 'jvm', 'scala-native-0.3.8', ] From 5930f514b506c5656af36ad254d9d6bc9791bd44 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 21:01:34 +0200 Subject: [PATCH 19/39] proper argument parsing --- scripts/run.py | 39 +++++++++++++++++++++++++++------------ scripts/summary.py | 33 +++++++++++++++++++++------------ 2 files changed, 48 insertions(+), 24 deletions(-) diff --git a/scripts/run.py b/scripts/run.py index 14fde56..0626a8e 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -4,6 +4,7 @@ import errno import subprocess as subp import shutil as sh +import argparse def mkdir(path): try: @@ -60,17 +61,16 @@ def compile(bench, compilecmd): 'sudoku.SudokuBenchmark', ] - +stable = 'scala-native-0.3.8' baseline = [ - 'jvm', - 'scala-native-0.3.8', + 'jvm', + stable, ] -latest = [ - 'scala-native-0.3.9-SNAPSHOT', -] +latest = 'scala-native-0.3.9-SNAPSHOT' + -configurations = baseline + latest +configurations = all_configs = baseline + [latest] if 'GRAALVM_HOME' in os.environ: baseline += [ @@ -83,10 +83,25 @@ def compile(bench, compilecmd): batch_size = 1 if __name__ == "__main__": - if "baseline" in sys.argv: - configurations = baseline - elif "latest" in sys.argv: - configurations = latest + parser = argparse.ArgumentParser() + parser.add_argument("--suffix", help="suffix added to results") + parser.add_argument("set", nargs='*', choices=configurations + ["baseline", "latest", "stable", "all"], + default="all") + args = parser.parse_args() + + if args.set != all_configs: + configurations = [] + for choice in args.set: + if choice == "baseline": + configurations += baseline + elif choice == "latest" in args.set: + configurations += [latest] + elif choice == "stable" in args.set: + configurations += [stable] + else: + configurations + [choice] + else: + configurations = all_configs for conf in configurations: for bench in benchmarks: @@ -108,7 +123,7 @@ def compile(bench, compilecmd): os.remove('project/plugins.sbt') compile(bench, compilecmd) - resultsdir = os.path.join('results', conf, bench) + resultsdir = os.path.join('results', conf + "_" + args.suffix, bench) mkdir(resultsdir) for n in xrange(runs): diff --git a/scripts/summary.py b/scripts/summary.py index 8436a2f..65cadc4 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -1,5 +1,5 @@ #!/usr/bin/env python2 -from run import benchmarks, runs, configurations, mkdir +from run import benchmarks, runs, mkdir, all_configs, latest, stable import numpy as np import time @@ -24,7 +24,7 @@ def config_data(bench, conf): return np.array(out) -def percentile(percentile): +def percentile(configurations, percentile): out = [] for bench in benchmarks: res = [] @@ -37,7 +37,7 @@ def percentile(percentile): return out -def bar_chart_relative(plt, percentile): +def bar_chart_relative(plt, configurations, percentile): ind = np.arange(len(benchmarks)) conf_count = len(configurations) + 1 base = [] @@ -65,7 +65,7 @@ def bar_chart_relative(plt, percentile): return plt -def percentiles_chart(plt, bench, limit=99): +def percentiles_chart(plt, configurations, bench, limit=99): for conf in configurations: data = config_data(bench, conf) percentiles = np.arange(0, limit) @@ -79,7 +79,7 @@ def percentiles_chart(plt, bench, limit=99): return plt -def print_table(data): +def print_table(configurations, data): leading = ['name'] for conf in configurations: leading.append(conf) @@ -88,7 +88,7 @@ def print_table(data): print ','.join([bench] + list(map(str, res))) -def write_md_table(file, data): +def write_md_table(file, configurations, data): header = ['name'] header.append(configurations[0]) for conf in configurations[1:]: @@ -125,18 +125,18 @@ def benchmark_short_name(bench): return bench.split(".")[0] -def write_md_file(rootdir, md_file): +def write_md_file(rootdir, md_file, configurations): md_file.write("# Summary\n") for p in [50, 90, 99]: md_file.write("## Benchmark run time (s) at {} percentile \n".format(p)) chart_name = "relative_percentile_" + str(p) + ".png" - bar_chart_relative(plt, p).savefig(rootdir + chart_name) + bar_chart_relative(plt, configurations, p).savefig(rootdir + chart_name) plt.clf() plt.cla() md_file.write("![Chart]({})\n\n".format(chart_name)) - write_md_table(md_file, percentile(p)) + write_md_table(md_file, configurations, percentile(configurations, p)) md_file.write("# Individual benchmarks\n") for bench in benchmarks: @@ -146,7 +146,7 @@ def write_md_file(rootdir, md_file): chart_name = "percentile_" + bench + ".png" chart_file = rootdir + chart_name - percentiles_chart(plt, bench).savefig(chart_file) + percentiles_chart(plt, configurations, bench).savefig(chart_file) plt.clf() plt.cla() @@ -154,11 +154,20 @@ def write_md_file(rootdir, md_file): if __name__ == '__main__': + configurations = [] if len(sys.argv) > 1: - configurations = sys.argv[1:] + for arg in sys.argv[1:]: + if arg == "latest": + configurations += [latest] + elif arg == "stable": + configurations += [stable] + else: + configurations += arg + else: + configurations = all_configs # print_table(percentile(50)) plt.rcParams["figure.figsize"] = [16.0, 12.0] rootdir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + "_vs_".join(configurations) + "/" mkdir(rootdir) with open(os.path.join(rootdir, "Readme.md"), 'w+') as md_file: - write_md_file(rootdir, md_file) + write_md_file(rootdir, md_file, configurations) From 994d1adf3c4b90e02c146cf67f26ad8767201dcb Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 21:16:39 +0200 Subject: [PATCH 20/39] prints resulting report path --- scripts/summary.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index 65cadc4..c352b48 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -165,9 +165,10 @@ def write_md_file(rootdir, md_file, configurations): configurations += arg else: configurations = all_configs - # print_table(percentile(50)) plt.rcParams["figure.figsize"] = [16.0, 12.0] - rootdir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + "_vs_".join(configurations) + "/" - mkdir(rootdir) - with open(os.path.join(rootdir, "Readme.md"), 'w+') as md_file: - write_md_file(rootdir, md_file, configurations) + report_dir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + "_vs_".join(configurations) + "/" + mkdir(report_dir) + with open(os.path.join(report_dir, "Readme.md"), 'w+') as md_file: + write_md_file(report_dir, md_file, configurations) + + print report_dir \ No newline at end of file From 97ec186175a804e9ca274fadc6241b6249f971d2 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 21:26:13 +0200 Subject: [PATCH 21/39] allow "latest" and "stable" as dynamic suffixes --- scripts/summary.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index c352b48..ecbf735 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -157,10 +157,10 @@ def write_md_file(rootdir, md_file, configurations): configurations = [] if len(sys.argv) > 1: for arg in sys.argv[1:]: - if arg == "latest": - configurations += [latest] - elif arg == "stable": - configurations += [stable] + if arg.startswith("latest"): + configurations += [latest + arg[len("latest"):]] + elif arg.startswith("stable"): + configurations += [stable + arg[len("stable"):]] else: configurations += arg else: From d0b004e03da8e08608103af745415940ba8eecd2 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 21:26:25 +0200 Subject: [PATCH 22/39] formatting --- scripts/run.py | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/scripts/run.py b/scripts/run.py index 0626a8e..07f86ad 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -6,19 +6,22 @@ import shutil as sh import argparse + def mkdir(path): try: os.makedirs(path) - except OSError as exc: # Python >2.5 + except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise + def slurp(path): with open(path) as f: return f.read().strip() + def where(cmd): if os.path.isfile(cmd): return cmd @@ -31,34 +34,37 @@ def where(cmd): else: return None + def run(cmd): print(">>> " + str(cmd)) return subp.check_output(cmd) + def compile(bench, compilecmd): cmd = [sbt, '-J-Xmx2G', 'clean'] cmd.append('set mainClass in Compile := Some("{}")'.format(bench)) cmd.append(compilecmd) return run(cmd) + sbt = where('sbt') benchmarks = [ - 'bounce.BounceBenchmark', - 'list.ListBenchmark', - 'richards.RichardsBenchmark', - 'queens.QueensBenchmark', - 'permute.PermuteBenchmark', - 'deltablue.DeltaBlueBenchmark', - 'tracer.TracerBenchmark', - 'brainfuck.BrainfuckBenchmark', - 'json.JsonBenchmark', - 'cd.CDBenchmark', - 'kmeans.KmeansBenchmark', - 'gcbench.GCBenchBenchmark', - 'mandelbrot.MandelbrotBenchmark', - 'nbody.NbodyBenchmark', - 'sudoku.SudokuBenchmark', + 'bounce.BounceBenchmark', + 'list.ListBenchmark', + 'richards.RichardsBenchmark', + 'queens.QueensBenchmark', + 'permute.PermuteBenchmark', + 'deltablue.DeltaBlueBenchmark', + 'tracer.TracerBenchmark', + 'brainfuck.BrainfuckBenchmark', + 'json.JsonBenchmark', + 'cd.CDBenchmark', + 'kmeans.KmeansBenchmark', + 'gcbench.GCBenchBenchmark', + 'mandelbrot.MandelbrotBenchmark', + 'nbody.NbodyBenchmark', + 'sudoku.SudokuBenchmark', ] stable = 'scala-native-0.3.8' @@ -69,13 +75,12 @@ def compile(bench, compilecmd): latest = 'scala-native-0.3.9-SNAPSHOT' - configurations = all_configs = baseline + [latest] if 'GRAALVM_HOME' in os.environ: baseline += [ - 'native-image', - 'native-image-pgo', + 'native-image', + 'native-image-pgo', ] runs = 20 @@ -110,7 +115,8 @@ def compile(bench, compilecmd): input = slurp(os.path.join('input', bench)) output = slurp(os.path.join('output', bench)) compilecmd = slurp(os.path.join('confs', conf, 'compile')) - runcmd = slurp(os.path.join('confs', conf, 'run')).replace('$BENCH', bench).replace('$HOME', os.environ['HOME']).split(' ') + runcmd = slurp(os.path.join('confs', conf, 'run')).replace('$BENCH', bench).replace('$HOME', os.environ[ + 'HOME']).split(' ') if os.path.exists(os.path.join('confs', conf, 'build.sbt')): sh.copyfile(os.path.join('confs', conf, 'build.sbt'), 'build.sbt') @@ -135,4 +141,3 @@ def compile(bench, compilecmd): out = run(cmd) with open(os.path.join(resultsdir, str(n)), 'w+') as resultfile: resultfile.write(out) - From 5eb427e80aa3ea251ca040d2f6d63a2488050544 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 21:44:34 +0200 Subject: [PATCH 23/39] can change the default comment on the suffix --- scripts/summary.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index ecbf735..5ba84ea 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -7,6 +7,7 @@ import matplotlib import matplotlib.pyplot as plt import os +import argparse def config_data(bench, conf): @@ -154,19 +155,38 @@ def write_md_file(rootdir, md_file, configurations): if __name__ == '__main__': + dirs = next(os.walk("results"))[1] + results = dirs + for dir in dirs: + if dir.startswith(latest): + results += ["latest" + dir[len(latest):]] + if dir.startswith(stable): + results += ["stable" + dir[len(stable):]] + + parser = argparse.ArgumentParser() + parser.add_argument("--comment", help="comment at the suffix of the report name") + parser.add_argument("comparisons", nargs='*', choices= results + ["all"], + default="all") + args = parser.parse_args() + configurations = [] - if len(sys.argv) > 1: - for arg in sys.argv[1:]: + if args.comparisons == "all": + configurations = all_configs + else: + for arg in args.comparisons: if arg.startswith("latest"): configurations += [latest + arg[len("latest"):]] elif arg.startswith("stable"): configurations += [stable + arg[len("stable"):]] else: configurations += arg - else: - configurations = all_configs + + comment = "_vs_".join(configurations) + if args.comment is not None: + comment = args.comment + + report_dir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + comment + "/" plt.rcParams["figure.figsize"] = [16.0, 12.0] - report_dir = "reports/summary_" + time.strftime('%Y%m%d_%H%M%S') + "_" + "_vs_".join(configurations) + "/" mkdir(report_dir) with open(os.path.join(report_dir, "Readme.md"), 'w+') as md_file: write_md_file(report_dir, md_file, configurations) From 9787a17e73ca2e836593aa809b8d7edd1ff3235e Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 22:17:48 +0200 Subject: [PATCH 24/39] include example runs --- scripts/summary.py | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index 5ba84ea..8c5ff12 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -39,6 +39,8 @@ def percentile(configurations, percentile): def bar_chart_relative(plt, configurations, percentile): + plt.clf() + plt.cla() ind = np.arange(len(benchmarks)) conf_count = len(configurations) + 1 base = [] @@ -66,7 +68,30 @@ def bar_chart_relative(plt, configurations, percentile): return plt +def example_run_plot(plt, configurations, bench, run=3): + plt.clf() + plt.cla() + + for conf in configurations: + points = [] + try: + with open('results/{}/{}/{}'.format(conf, bench, run)) as data: + for line in data.readlines(): + points.append(float(line) / 1000000) + except IOError: + pass + ind = np.arange(len(points)) + plt.plot(ind, points, label=conf) + plt.title("{} run #{}".format(bench, str(run))) + plt.xlabel("Iteration") + plt.ylabel("Run time (s)") + plt.legend() + return plt + + def percentiles_chart(plt, configurations, bench, limit=99): + plt.clf() + plt.cla() for conf in configurations: data = config_data(bench, conf) percentiles = np.arange(0, limit) @@ -132,8 +157,6 @@ def write_md_file(rootdir, md_file, configurations): md_file.write("## Benchmark run time (s) at {} percentile \n".format(p)) chart_name = "relative_percentile_" + str(p) + ".png" bar_chart_relative(plt, configurations, p).savefig(rootdir + chart_name) - plt.clf() - plt.cla() md_file.write("![Chart]({})\n\n".format(chart_name)) @@ -148,11 +171,14 @@ def write_md_file(rootdir, md_file, configurations): chart_name = "percentile_" + bench + ".png" chart_file = rootdir + chart_name percentiles_chart(plt, configurations, bench).savefig(chart_file) - plt.clf() - plt.cla() md_file.write("![Chart]({})\n".format(chart_name)) + chart_name = "example_run_3_" + bench + ".png" + chart_file = rootdir + chart_name + example_run_plot(plt, configurations, bench).savefig(chart_file) + md_file.write("![Chart]({})\n".format(chart_name)) + if __name__ == '__main__': dirs = next(os.walk("results"))[1] From b1075105a803662fe33ec7ac1e3d74c02893a8f4 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 22:22:26 +0200 Subject: [PATCH 25/39] fix links --- scripts/summary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/summary.py b/scripts/summary.py index 8c5ff12..a905837 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -144,7 +144,7 @@ def cell(x, base): def benchmark_md_link(bench): - return "[{}]({})".format(bench, bench.replace(".", "").lower()) + return "[{}](#{})".format(bench, bench.replace(".", "").lower()) def benchmark_short_name(bench): From 0683808e7d4eb79aabdbe5f7bee3e94d9b34c75b Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 23 Sep 2018 22:48:11 +0200 Subject: [PATCH 26/39] explain usage --- README.md | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d200b09..053e3b9 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,75 @@ as-is and summarized in a separate post-processing step. python scripts/run.py ``` -## Viewing result summary +## Creating result summary ``` python scripts/summary.py ``` + +The reports can be viewed in the `reports` directory. + +## Advanced use + +### Comparing specific versions + +You can run just the configurations you are interested in +```bash +scripts/run.py stable latest +``` + +Compare the lastest `stable` relesea vs `latest` snapshot +```bash +REPORT=$(scripts/summary.py stable latest) +``` + +### Comparing an experimental feature with latest from master +1. build `scala-native` from latest master +2. run the benchmark for it +```bash +scripts/run.py latest +``` +3. build `scala-native` from your branch +4. specify a suffix to identify it +```bash +NAME=PR9001-adding-a-kitchen-sink +``` +5. run the benchmark and get the summary report +```bash +scripts/run.py --suffix "$NAME" latest && +REPORT=$(scripts/summary.py --comment "$NAME" latest latest_"$NAME") +``` + +## Persisting reports +The following commands assume that you have a git repository checked out at `gh-pages` under `../scala-native-benchmark-results`. + +Also that there is an executable script `just-upload.sh` in the root of that repository. +```bash +#just-upload.sh + + +#!/bin/bash +# move to the directory of the script +cd $(dirname "$0") + +git add . && +git commit -m "automated commit" && git push +``` + +### saving experiment data +```bash +cp -r results/ ../scala-native-benchmark-results && +../scala-native-benchmark-results/just-upload.sh +``` + +### restoring experiment data +```bash +cp -r ../scala-native-benchmark-results results/ +``` + +### uploading a report +```bash +mkdir -p ../scala-native-benchmark-results/reports +cp -r "$REPORT" ../scala-native-benchmark-results/reports && +../scala-native-benchmark-results/just-upload.sh +``` \ No newline at end of file From 6800fd588e4dd649f60ae69afeaac0e36d8f67b0 Mon Sep 17 00:00:00 2001 From: Valdis Date: Tue, 25 Sep 2018 13:46:14 +0200 Subject: [PATCH 27/39] handle empty suffixes --- scripts/run.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/run.py b/scripts/run.py index 07f86ad..1a9caf4 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -129,7 +129,11 @@ def compile(bench, compilecmd): os.remove('project/plugins.sbt') compile(bench, compilecmd) - resultsdir = os.path.join('results', conf + "_" + args.suffix, bench) + suffix = "" + if args.suffix is not None: + suffix = "_" + args.suffix + + resultsdir = os.path.join('results', conf + suffix, bench) mkdir(resultsdir) for n in xrange(runs): From ee834a6a55e558871c73069ef8f27cd7f4e0a50c Mon Sep 17 00:00:00 2001 From: Valdis Date: Tue, 25 Sep 2018 22:35:49 +0200 Subject: [PATCH 28/39] support 2 new lto variants: full and thin --- .../build.sbt | 6 +++ .../compile | 1 + .../plugins.sbt | 1 + .../scala-native-0.3.9-SNAPSHOT-full-tlo/run | 1 + .../build.sbt | 6 +++ .../compile | 1 + .../plugins.sbt | 1 + .../scala-native-0.3.9-SNAPSHOT-thin-tlo/run | 1 + scripts/run.py | 51 ++++++++++++++----- scripts/summary.py | 18 ++----- 10 files changed, 59 insertions(+), 28 deletions(-) create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-full-tlo/build.sbt create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-full-tlo/compile create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-full-tlo/plugins.sbt create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-full-tlo/run create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/build.sbt create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/compile create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/plugins.sbt create mode 100644 confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/run diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/build.sbt b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/build.sbt new file mode 100644 index 0000000..fae449a --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/build.sbt @@ -0,0 +1,6 @@ +scalaVersion := "2.11.12" +enablePlugins(ScalaNativePlugin) +nativeLinkStubs := true +nativeGC := "immix" +nativeMode := "release" +nativeLTO := "full" diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/compile b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/compile new file mode 100644 index 0000000..2f3f09f --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/compile @@ -0,0 +1 @@ +nativeLink diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/plugins.sbt b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/plugins.sbt new file mode 100644 index 0000000..c1423b6 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/plugins.sbt @@ -0,0 +1 @@ +addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.3.9-SNAPSHOT") diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/run b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/run new file mode 100644 index 0000000..ae89e34 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/run @@ -0,0 +1 @@ +target/scala-2.11/scala-native-benchmarks-out diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/build.sbt b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/build.sbt new file mode 100644 index 0000000..ae87f31 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/build.sbt @@ -0,0 +1,6 @@ +scalaVersion := "2.11.12" +enablePlugins(ScalaNativePlugin) +nativeLinkStubs := true +nativeGC := "immix" +nativeMode := "release" +nativeLTO := "thin" diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/compile b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/compile new file mode 100644 index 0000000..2f3f09f --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/compile @@ -0,0 +1 @@ +nativeLink diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/plugins.sbt b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/plugins.sbt new file mode 100644 index 0000000..c1423b6 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/plugins.sbt @@ -0,0 +1 @@ +addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.3.9-SNAPSHOT") diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/run b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/run new file mode 100644 index 0000000..ae89e34 --- /dev/null +++ b/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/run @@ -0,0 +1 @@ +target/scala-2.11/scala-native-benchmarks-out diff --git a/scripts/run.py b/scripts/run.py index 1a9caf4..3a4dea8 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -68,43 +68,66 @@ def compile(bench, compilecmd): ] stable = 'scala-native-0.3.8' +latest = 'scala-native-0.3.9-SNAPSHOT' baseline = [ 'jvm', stable, ] -latest = 'scala-native-0.3.9-SNAPSHOT' +confs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + "/confs" -configurations = all_configs = baseline + [latest] +configurations = all_configs = next(os.walk(confs_path))[1] + +graalvm = [ + 'native-image', + 'native-image-pgo', +] if 'GRAALVM_HOME' in os.environ: - baseline += [ - 'native-image', - 'native-image-pgo', - ] + baseline += graalvm +else: + for g in graalvm: + all_configs.remove(g) runs = 20 batches = 3000 batch_size = 1 + +def expand_wild_cards(arg): + if arg.startswith("latest"): + return latest + arg[len("latest"):] + elif arg.startswith("stable"): + return stable + arg[len("stable"):] + else: + return arg + + +def generate_choices(direct_choices): + results = direct_choices + for dir in direct_choices: + if dir.startswith(latest): + results += ["latest" + dir[len(latest):]] + if dir.startswith(stable): + results += ["stable" + dir[len(stable):]] + return results + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--suffix", help="suffix added to results") - parser.add_argument("set", nargs='*', choices=configurations + ["baseline", "latest", "stable", "all"], + parser.add_argument("set", nargs='*', choices=generate_choices(configurations) + ["baseline", "all"], default="all") args = parser.parse_args() - if args.set != all_configs: + if args.set != "all": configurations = [] for choice in args.set: - if choice == "baseline": + expanded = expand_wild_cards(choice) + if expanded == "baseline": configurations += baseline - elif choice == "latest" in args.set: - configurations += [latest] - elif choice == "stable" in args.set: - configurations += [stable] else: - configurations + [choice] + configurations += [expanded] else: configurations = all_configs diff --git a/scripts/summary.py b/scripts/summary.py index a905837..ce61859 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -1,5 +1,5 @@ #!/usr/bin/env python2 -from run import benchmarks, runs, mkdir, all_configs, latest, stable +from run import benchmarks, runs, mkdir, expand_wild_cards, generate_choices import numpy as np import time @@ -181,13 +181,8 @@ def write_md_file(rootdir, md_file, configurations): if __name__ == '__main__': - dirs = next(os.walk("results"))[1] - results = dirs - for dir in dirs: - if dir.startswith(latest): - results += ["latest" + dir[len(latest):]] - if dir.startswith(stable): - results += ["stable" + dir[len(stable):]] + all_configs = next(os.walk("results"))[1] + results = generate_choices(all_configs) parser = argparse.ArgumentParser() parser.add_argument("--comment", help="comment at the suffix of the report name") @@ -200,12 +195,7 @@ def write_md_file(rootdir, md_file, configurations): configurations = all_configs else: for arg in args.comparisons: - if arg.startswith("latest"): - configurations += [latest + arg[len("latest"):]] - elif arg.startswith("stable"): - configurations += [stable + arg[len("stable"):]] - else: - configurations += arg + configurations += [expand_wild_cards(arg)] comment = "_vs_".join(configurations) if args.comment is not None: From 2c6b9950086b741189a472e76da7a3f65ffad9f3 Mon Sep 17 00:00:00 2001 From: Valdis Date: Thu, 27 Sep 2018 16:43:28 +0200 Subject: [PATCH 29/39] fixed typo in the config names --- .../build.sbt | 0 .../compile | 0 .../plugins.sbt | 0 .../run | 0 .../build.sbt | 0 .../compile | 0 .../plugins.sbt | 0 .../run | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename confs/{scala-native-0.3.9-SNAPSHOT-full-tlo => scala-native-0.3.9-SNAPSHOT-full-lto}/build.sbt (100%) rename confs/{scala-native-0.3.9-SNAPSHOT-full-tlo => scala-native-0.3.9-SNAPSHOT-full-lto}/compile (100%) rename confs/{scala-native-0.3.9-SNAPSHOT-full-tlo => scala-native-0.3.9-SNAPSHOT-full-lto}/plugins.sbt (100%) rename confs/{scala-native-0.3.9-SNAPSHOT-full-tlo => scala-native-0.3.9-SNAPSHOT-full-lto}/run (100%) rename confs/{scala-native-0.3.9-SNAPSHOT-thin-tlo => scala-native-0.3.9-SNAPSHOT-thin-lto}/build.sbt (100%) rename confs/{scala-native-0.3.9-SNAPSHOT-thin-tlo => scala-native-0.3.9-SNAPSHOT-thin-lto}/compile (100%) rename confs/{scala-native-0.3.9-SNAPSHOT-thin-tlo => scala-native-0.3.9-SNAPSHOT-thin-lto}/plugins.sbt (100%) rename confs/{scala-native-0.3.9-SNAPSHOT-thin-tlo => scala-native-0.3.9-SNAPSHOT-thin-lto}/run (100%) diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/build.sbt b/confs/scala-native-0.3.9-SNAPSHOT-full-lto/build.sbt similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-full-tlo/build.sbt rename to confs/scala-native-0.3.9-SNAPSHOT-full-lto/build.sbt diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/compile b/confs/scala-native-0.3.9-SNAPSHOT-full-lto/compile similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-full-tlo/compile rename to confs/scala-native-0.3.9-SNAPSHOT-full-lto/compile diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/plugins.sbt b/confs/scala-native-0.3.9-SNAPSHOT-full-lto/plugins.sbt similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-full-tlo/plugins.sbt rename to confs/scala-native-0.3.9-SNAPSHOT-full-lto/plugins.sbt diff --git a/confs/scala-native-0.3.9-SNAPSHOT-full-tlo/run b/confs/scala-native-0.3.9-SNAPSHOT-full-lto/run similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-full-tlo/run rename to confs/scala-native-0.3.9-SNAPSHOT-full-lto/run diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/build.sbt b/confs/scala-native-0.3.9-SNAPSHOT-thin-lto/build.sbt similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/build.sbt rename to confs/scala-native-0.3.9-SNAPSHOT-thin-lto/build.sbt diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/compile b/confs/scala-native-0.3.9-SNAPSHOT-thin-lto/compile similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/compile rename to confs/scala-native-0.3.9-SNAPSHOT-thin-lto/compile diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/plugins.sbt b/confs/scala-native-0.3.9-SNAPSHOT-thin-lto/plugins.sbt similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/plugins.sbt rename to confs/scala-native-0.3.9-SNAPSHOT-thin-lto/plugins.sbt diff --git a/confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/run b/confs/scala-native-0.3.9-SNAPSHOT-thin-lto/run similarity index 100% rename from confs/scala-native-0.3.9-SNAPSHOT-thin-tlo/run rename to confs/scala-native-0.3.9-SNAPSHOT-thin-lto/run From cf0a4392e178cecac6c9e6abc806665bba7f5f49 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 14:20:17 +0200 Subject: [PATCH 30/39] extra arguments --- scripts/run.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/scripts/run.py b/scripts/run.py index 3a4dea8..99fb0a0 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -89,8 +89,9 @@ def compile(bench, compilecmd): for g in graalvm: all_configs.remove(g) -runs = 20 -batches = 3000 +default_runs = 20 +default_batches = 3000 +default_par = 1 batch_size = 1 @@ -116,10 +117,27 @@ def generate_choices(direct_choices): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--suffix", help="suffix added to results") + parser.add_argument("--runs", help="number of runs", type=int, default=default_runs) + parser.add_argument("--batches", help="number of batches per run", type=int, default=default_batches) + parser.add_argument("--par", help="number of parallel processes per run", type=int, default=default_par) parser.add_argument("set", nargs='*', choices=generate_choices(configurations) + ["baseline", "all"], default="all") args = parser.parse_args() + runs = args.runs + batches = args.batches + par = args.par + + suffix = "" + if runs != default_runs: + suffix += "-r" + runs + if batches != default_batches: + suffix += "-b" + batches + if par != default_par: + suffix += "-p" + par + if args.suffix is not None: + suffix += "_" + args.suffix + if args.set != "all": configurations = [] for choice in args.set: @@ -152,11 +170,9 @@ def generate_choices(direct_choices): os.remove('project/plugins.sbt') compile(bench, compilecmd) - suffix = "" - if args.suffix is not None: - suffix = "_" + args.suffix - resultsdir = os.path.join('results', conf + suffix, bench) + + resultsdir = os.path.join('results', conf + suffix, bench) mkdir(resultsdir) for n in xrange(runs): From 5aa7f74fbeb2e51bfe3b99c5754b55aede91b34c Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 14:40:47 +0200 Subject: [PATCH 31/39] handling failures --- scripts/run.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/scripts/run.py b/scripts/run.py index 99fb0a0..6a9cd95 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -149,6 +149,8 @@ def generate_choices(direct_choices): else: configurations = all_configs + failed = [] + for conf in configurations: for bench in benchmarks: print('--- conf: {}, bench: {}'.format(conf, bench)) @@ -171,7 +173,6 @@ def generate_choices(direct_choices): compile(bench, compilecmd) - resultsdir = os.path.join('results', conf + suffix, bench) mkdir(resultsdir) @@ -181,6 +182,19 @@ def generate_choices(direct_choices): cmd = [] cmd.extend(runcmd) cmd.extend([str(batches), str(batch_size), input, output]) - out = run(cmd) - with open(os.path.join(resultsdir, str(n)), 'w+') as resultfile: - resultfile.write(out) + try: + out = run(cmd) + with open(os.path.join(resultsdir, str(n)), 'w+') as resultfile: + resultfile.write(out) + except subp.CalledProcessError as err: + out = err.output + print "Failure!" + print out + with open(os.path.join(resultsdir, str(n) + ".failed"), 'w+') as failfile: + failfile.write(out) + failed += [dict(conf=conf, bench=bench, run=n)] + if len(failed) > 0: + print("{} benchmarks failed ".format(len(failed))) + for fail in failed: + print fail + exit(1) \ No newline at end of file From 781d36c4712da60ef02d8ce37c80858611585da9 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 14:48:20 +0200 Subject: [PATCH 32/39] added missing string conversions --- scripts/run.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/run.py b/scripts/run.py index 6a9cd95..e9a548e 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -130,11 +130,11 @@ def generate_choices(direct_choices): suffix = "" if runs != default_runs: - suffix += "-r" + runs + suffix += "-r" + str(runs) if batches != default_batches: - suffix += "-b" + batches + suffix += "-b" + str(batches) if par != default_par: - suffix += "-p" + par + suffix += "-p" + str(par) if args.suffix is not None: suffix += "_" + args.suffix From e5e3da92fa36d415b8bc37fa8afbf3356406d7d3 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 15:30:37 +0200 Subject: [PATCH 33/39] added parrallel execution --- scripts/run.py | 61 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/scripts/run.py b/scripts/run.py index e9a548e..3721c19 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -5,6 +5,7 @@ import subprocess as subp import shutil as sh import argparse +import multiprocessing as mp def mkdir(path): @@ -114,12 +115,35 @@ def generate_choices(direct_choices): return results +def single_run(to_run): + n = to_run["n"] + runs = to_run["runs"] + cmd = to_run["cmd"] + resultsdir = to_run["resultsdir"] + conf = to_run["conf"] + bench = to_run["bench"] + + print('--- run {}/{}'.format(n, runs)) + try: + out = run(cmd) + with open(os.path.join(resultsdir, str(n)), 'w+') as resultfile: + resultfile.write(out) + return [] + except subp.CalledProcessError as err: + out = err.output + print "Failure!" + print out + with open(os.path.join(resultsdir, str(n) + ".failed"), 'w+') as failfile: + failfile.write(out) + return [dict(conf=conf, bench=bench, run=n)] + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--suffix", help="suffix added to results") parser.add_argument("--runs", help="number of runs", type=int, default=default_runs) parser.add_argument("--batches", help="number of batches per run", type=int, default=default_batches) - parser.add_argument("--par", help="number of parallel processes per run", type=int, default=default_par) + parser.add_argument("--par", help="number of parallel processes", type=int, default=default_par) parser.add_argument("set", nargs='*', choices=generate_choices(configurations) + ["baseline", "all"], default="all") args = parser.parse_args() @@ -150,6 +174,9 @@ def generate_choices(direct_choices): configurations = all_configs failed = [] + pool = None + if par > 1: + pool = mp.Pool(par) for conf in configurations: for bench in benchmarks: @@ -176,25 +203,23 @@ def generate_choices(direct_choices): resultsdir = os.path.join('results', conf + suffix, bench) mkdir(resultsdir) + cmd = [] + cmd.extend(runcmd) + cmd.extend([str(batches), str(batch_size), input, output]) + + to_run = [] for n in xrange(runs): - print('--- run {}/{}'.format(n, runs)) - - cmd = [] - cmd.extend(runcmd) - cmd.extend([str(batches), str(batch_size), input, output]) - try: - out = run(cmd) - with open(os.path.join(resultsdir, str(n)), 'w+') as resultfile: - resultfile.write(out) - except subp.CalledProcessError as err: - out = err.output - print "Failure!" - print out - with open(os.path.join(resultsdir, str(n) + ".failed"), 'w+') as failfile: - failfile.write(out) - failed += [dict(conf=conf, bench=bench, run=n)] + to_run += [dict(runs=runs, cmd=cmd, resultsdir=resultsdir, conf=conf, bench=bench, n=n)] + + + if par == 1: + for tr in to_run: + failed += single_run(tr) + else: + sum(pool.map(single_run, to_run),[]) + if len(failed) > 0: print("{} benchmarks failed ".format(len(failed))) for fail in failed: print fail - exit(1) \ No newline at end of file + exit(1) From c27bf954b928398fc80d0f080ef436f571f0a33d Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 16:06:08 +0200 Subject: [PATCH 34/39] display the failed statistics for par as well --- scripts/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run.py b/scripts/run.py index 3721c19..865c33f 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -216,7 +216,7 @@ def single_run(to_run): for tr in to_run: failed += single_run(tr) else: - sum(pool.map(single_run, to_run),[]) + failed += sum(pool.map(single_run, to_run),[]) if len(failed) > 0: print("{} benchmarks failed ".format(len(failed))) From 50186133a7e247293db22983f62791a36a1f5780 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 16:06:55 +0200 Subject: [PATCH 35/39] removed unused import --- scripts/run.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/run.py b/scripts/run.py index 865c33f..100c836 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -1,5 +1,4 @@ #!/usr/bin/env python2 -import sys import os import errno import subprocess as subp From 80b83246418f84eddb7da5196c8de0c76c4cca99 Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 18:23:07 +0200 Subject: [PATCH 36/39] scan for all successful runs --- scripts/summary.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index ce61859..bcaf6ec 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -1,5 +1,5 @@ #!/usr/bin/env python2 -from run import benchmarks, runs, mkdir, expand_wild_cards, generate_choices +from run import benchmarks, mkdir, expand_wild_cards, generate_choices import numpy as np import time @@ -11,8 +11,14 @@ def config_data(bench, conf): + files = next(os.walk("results/{}/{}".format(conf, bench)))[2] + runs = [] + for file in files: + if not file.endswith(".fail"): + runs += [file] + out = [] - for run in xrange(runs): + for run in runs: try: points = [] with open('results/{}/{}/{}'.format(conf, bench, run)) as data: @@ -186,7 +192,7 @@ def write_md_file(rootdir, md_file, configurations): parser = argparse.ArgumentParser() parser.add_argument("--comment", help="comment at the suffix of the report name") - parser.add_argument("comparisons", nargs='*', choices= results + ["all"], + parser.add_argument("comparisons", nargs='*', choices=results + ["all"], default="all") args = parser.parse_args() @@ -207,4 +213,4 @@ def write_md_file(rootdir, md_file, configurations): with open(os.path.join(report_dir, "Readme.md"), 'w+') as md_file: write_md_file(report_dir, md_file, configurations) - print report_dir \ No newline at end of file + print report_dir From 60a15c67bb51487c68841f58a695999c40bd28ae Mon Sep 17 00:00:00 2001 From: Valdis Date: Sun, 30 Sep 2018 21:51:55 +0200 Subject: [PATCH 37/39] also collect stderr --- scripts/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run.py b/scripts/run.py index 100c836..f22042f 100755 --- a/scripts/run.py +++ b/scripts/run.py @@ -37,7 +37,7 @@ def where(cmd): def run(cmd): print(">>> " + str(cmd)) - return subp.check_output(cmd) + return subp.check_output(cmd, stderr=subp.STDOUT) def compile(bench, compilecmd): From d451b94b67c7db792435264ac0b9d28e29835699 Mon Sep 17 00:00:00 2001 From: Valdis Date: Tue, 2 Oct 2018 11:41:18 +0200 Subject: [PATCH 38/39] readme --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 053e3b9..19a79d5 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,15 @@ Compare the lastest `stable` relesea vs `latest` snapshot REPORT=$(scripts/summary.py stable latest) ``` +### Specifying number of runs, batches, tests run in parallel +```bash +scripts/run.py --par 4 --runs 50 --batches 3000 stable +# 50 runs (4 in parallel) each with 3000 batches for the stable release. +``` + +These settings will impact accuracy, this is why the names of the results folders will include the settings, in this case `scala-native-0.3.8-r50-p40-b3000`. +Note that you can also use `stable-r50-p40-b3000` when using the `summary.py`. + ### Comparing an experimental feature with latest from master 1. build `scala-native` from latest master 2. run the benchmark for it From 733bd0d052c254e969dd0d9e9720d96c86ee332f Mon Sep 17 00:00:00 2001 From: Valdis Date: Wed, 3 Oct 2018 11:54:32 +0200 Subject: [PATCH 39/39] handle missing data --- scripts/summary.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/summary.py b/scripts/summary.py index bcaf6ec..2c38bd5 100755 --- a/scripts/summary.py +++ b/scripts/summary.py @@ -11,7 +11,7 @@ def config_data(bench, conf): - files = next(os.walk("results/{}/{}".format(conf, bench)))[2] + files = next(os.walk("results/{}/{}".format(conf, bench)), [[],[],[]])[2] runs = [] for file in files: if not file.endswith(".fail"): @@ -100,9 +100,10 @@ def percentiles_chart(plt, configurations, bench, limit=99): plt.cla() for conf in configurations: data = config_data(bench, conf) - percentiles = np.arange(0, limit) - percvalue = np.array([np.percentile(data, perc) for perc in percentiles]) - plt.plot(percentiles, percvalue, label=conf) + if data.size > 0: + percentiles = np.arange(0, limit) + percvalue = np.array([np.percentile(data, perc) for perc in percentiles]) + plt.plot(percentiles, percvalue, label=conf) plt.legend() plt.title(bench) plt.ylim(ymin=0)