Skip to content

Commit

Permalink
benchtests: improve argument parsing through argparse library
Browse files Browse the repository at this point in the history
The argparse library is used on compare_bench script to improve command line
argument parsing. The 'schema validation file' is now optional, reducing by
one the number of required parameters.

	* benchtests/scripts/compare_bench.py (__main__): use the argparse
	library to improve command line parsing.
	(__main__): make schema file as optional parameter (--schema),
	defaulting to benchtests/scripts/benchout.schema.json.
	(main): move out of the parsing stuff to __main_  and leave it
	only as caller of main comparison functions.
  • Loading branch information
Leonardo Sandoval committed Jul 19, 2018
1 parent e84bd85 commit 1cf4ae7
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 21 deletions.
9 changes: 9 additions & 0 deletions ChangeLog
@@ -1,3 +1,12 @@
2018-07-19 Leonardo Sandoval <leonardo.sandoval.gonzalez@intel.com>

* benchtests/scripts/compare_bench.py (__main__): use the argparse
library to improve command line parsing.
(__main__): make schema file as optional parameter (--schema),
defaulting to benchtests/scripts/benchout.schema.json.
(main): move out of the parsing stuff to __main_  and leave it
only as caller of main comparison functions.

2018-07-19 H.J. Lu <hongjiu.lu@intel.com>

* NEWS: Add a note for Intel CET status.
Expand Down
40 changes: 19 additions & 21 deletions benchtests/scripts/compare_bench.py
Expand Up @@ -25,6 +25,7 @@
import os
import pylab
import import_bench as bench
import argparse

def do_compare(func, var, tl1, tl2, par, threshold):
"""Compare one of the aggregate measurements
Expand Down Expand Up @@ -151,26 +152,9 @@ def plot_graphs(bench1, bench2):
print('Writing out %s' % filename)
pylab.savefig(filename)


def main(args):
"""Program Entry Point
Take two benchmark output files and compare their timings.
"""
if len(args) > 4 or len(args) < 3:
print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0])
sys.exit(os.EX_USAGE)

bench1 = bench.parse_bench(args[1], args[0])
bench2 = bench.parse_bench(args[2], args[0])
if len(args) == 4:
threshold = float(args[3])
else:
threshold = 10.0

if (bench1['timing_type'] != bench2['timing_type']):
print('Cannot compare benchmark outputs: timing types are different')
return
def main(bench1, bench2, schema, threshold):
bench1 = bench.parse_bench(bench1, schema)
bench2 = bench.parse_bench(bench2, schema)

plot_graphs(bench1, bench2)

Expand All @@ -181,4 +165,18 @@ def main(args):


if __name__ == '__main__':
main(sys.argv[1:])
parser = argparse.ArgumentParser(description='Take two benchmark and compare their timings.')

# Required parameters
parser.add_argument('bench1', help='First bench to compare')
parser.add_argument('bench2', help='Second bench to compare')

# Optional parameters
parser.add_argument('--schema',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'benchout.schema.json'),
help='JSON file to validate source/dest files (default: %(default)s)')
parser.add_argument('--threshold', default=10.0, help='Only print those with equal or higher threshold (default: %(default)s)')

args = parser.parse_args()

main(args.bench1, args.bench2, args.schema, args.threshold)

0 comments on commit 1cf4ae7

Please sign in to comment.