Skip to content

Commit

Permalink
bp-run: now using argument_handling.py anymore
Browse files Browse the repository at this point in the history
  • Loading branch information
madsbk committed May 2, 2017
1 parent 1aec49f commit 7e4da47
Show file tree
Hide file tree
Showing 4 changed files with 85 additions and 35 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ And run the script::
The result is a JSON file `results.json` that encapsulate the commands that make up the benchmark suite.
Now, use `bp-run` to run the benchmark suite::

$bp-run --output results.json
$bp-run results.json
Executing 'X-ray/10*10*1'
Executing 'X-ray/20*10*1'
Executing 'Bean/10000*10'
Expand Down
100 changes: 73 additions & 27 deletions benchpress/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
from __future__ import absolute_import
import os
import json
import argparse
from subprocess import Popen, PIPE
from . import argument_handling
from .argument_handling import args


class C:
Expand All @@ -25,7 +25,7 @@ def write2json(json_file, obj):
os.fsync(json_file)


def job_execute_locally(job, verbose=False):
def job_execute_locally(job, verbose=False, dirty=False):
"""Execute the job locally"""

try:
Expand All @@ -44,7 +44,7 @@ def job_execute_locally(job, verbose=False):
p.wait()
except KeyboardInterrupt:
p.kill()
if not args().dirty:
if not dirty:
for i in range(job['nruns']):
base = "%s-%d" % (job['filename'], i)
stdout = "%s.out" % base
Expand All @@ -57,13 +57,13 @@ def job_execute_locally(job, verbose=False):
raise KeyboardInterrupt()
finally:
try:
if not args().dirty:
if not dirty:
os.remove(job['filename'])
except OSError:
pass


def job_gather_results(job):
def job_gather_results(job, dirty=False):
"""Gather the results of the bash job. NB: the job must be finished!"""

ret = []
Expand All @@ -84,7 +84,7 @@ def job_gather_results(job):
if len(result['stderr']) > 0:
print ("%sSTDERR:%s" % (C.WARN, C.END))
print ("%s\t%s%s" % (C.FAIL, result['stderr'].replace('\n', '\n\t'), C.END))
if not args().dirty:
if not dirty:
os.remove(stdout)
os.remove(stderr)
except IOError:
Expand All @@ -97,30 +97,76 @@ def job_gather_results(job):
def main():
"""Run the commands in the '--output' JSON file not already finished"""

if args().output is None:
argument_handling.error("When running, please set argument '--output'")

print ("Running benchmark; results are written to: %s" % args().output)
parser = argparse.ArgumentParser(description='Runs a benchmark suite and stores the results in a JSON-file.')
parser.add_argument(
'suite',
type=argparse.FileType('r+'),
help='Path to the JSON file where the benchmark results will be read and written. '
'If the file exist, the benchmark will resume.'
)
parser.add_argument(
'--runs',
default=3,
type=int,
help="How many times should each command run."
)
parser.add_argument(
'--dirty',
action="store_true",
help="Do no clean up."
)

slurm_grp = parser.add_argument_group('SLURM Queuing System')
slurm_grp.add_argument(
'--slurm',
action="store_true",
help="Use the SLURM queuing system."
)
slurm_grp.add_argument(
'--partition',
type=str,
help="Submit to a specific SLURM partition."
)
slurm_grp.add_argument(
'--multi-jobs',
action="store_true",
help="Submit 'nruns' SLURM jobs instead of one job with 'nruns' number of runs."
)
slurm_grp.add_argument(
'--wait',
action="store_true",
help="Wait for all SLURM jobs to finished before returning."
)
slurm_grp.add_argument(
'--nice',
type=int,
help="The scheduling priority - range is from -10000 (highest priority) to "
"10000 (lowest priority) where zero is default. Only privileged "
"users can specify a negative priority.",
default=0
)
args = parser.parse_args()

print ("Running benchmark; results are written to: %s" % args.suite.name)
try:
with open(args().output, 'r+') as json_file:
suite = json.load(json_file)
cmd_list = suite['cmd_list']
for cmd in cmd_list:
for job in cmd['jobs']:
if job['status'] == 'pending':
# The user wants local execution
print ("Executing '%s'" % (cmd['label']))
job_execute_locally(job)
job['results'] = job_gather_results(job)
if all(res['success'] for res in job['results']):
job['status'] = 'finished'
else:
job['status'] = 'failed'
write2json(json_file, suite)
print ("%sFinished execution, result written in '%s'%s" % (C.WARN, args().output, C.END))
suite = json.load(args.suite)
cmd_list = suite['cmd_list']
for cmd in cmd_list:
for job in cmd['jobs']:
if job['status'] == 'pending':
# The user wants local execution
print ("Executing '%s'" % (cmd['label']))
job_execute_locally(job)
job['results'] = job_gather_results(job)
if all(res['success'] for res in job['results']):
job['status'] = 'finished'
else:
job['status'] = 'failed'
write2json(args.suite, suite)
print ("%sFinished execution, result written in '%s'%s" % (C.WARN, args.suite.name, C.END))
except KeyboardInterrupt:
print ("%sSuspending the benchmark execution, "
"continue with: 'bp-run --output %s'%s" % (C.WARN, args().output, C.END))
"continue with: 'bp-run %s'%s" % (C.WARN, args.suite.name, C.END))


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion benchpress/suites/nas_parallel_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
And then *run* the suite::
bp-run --output nas_suite.json
bp-run nas_suite.json
Finally, check the result::
Expand Down
16 changes: 10 additions & 6 deletions doc/source/usage_bp-run.out
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
usage: bp-run [-h] [--output RESULT_FILE] [--runs RUNS] [--slurm]
[--partition PARTITION] [--multi-jobs] [--wait] [--nice NICE]
usage: bp-run [-h] [--runs RUNS] [--dirty] [--slurm] [--partition PARTITION]
[--multi-jobs] [--wait] [--nice NICE]
suite

Runs a benchmark suite and stores the results in a JSON-file.

positional arguments:
suite Path to the JSON file where the benchmark results will
be read and written. If the file exist, the benchmark
will resume.

optional arguments:
-h, --help show this help message and exit
--output RESULT_FILE Path to the JSON file where the benchmark results will
be written. If the file exist, the benchmark will
resume.
--runs RUNS How many times should each command run.
--dirty Do no clean up.

SLURM Queuing System:
--slurm Use the SLURM queuing system.
Expand All @@ -20,4 +24,4 @@ SLURM Queuing System:
--nice NICE The scheduling priority - range is from -10000
(highest priority) to 10000 (lowest priority) where
zero is default. Only privileged users can specify a
negative priority.
negative priority.

0 comments on commit 7e4da47

Please sign in to comment.