diff --git a/docs/usage.md b/docs/usage.md index 315e51aa..3e738dd9 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -77,7 +77,7 @@ considered. Similarly, naming more benchmarks will include them all. ReBench supports a range of other options to control execution. -#### Quick Runs, Iterations, Invocations +#### Quick Runs, Iterations, Invocations, Building The [configuration](config.md#invocation) uses the notion of iteration and invocation to define how often an executor is started (invocation) and how many @@ -93,6 +93,13 @@ We can override this setting with the following parameters: within an executor execution. -q, --quick Execute quickly. Identical with --iterations=1 --invocations=1 + +--setup-only Build all executors and suites, and run one benchmark for each executor. + This ensures executors and suites are built. + It Implies --iterations=1 --invocations=1. + +-B, --without-building + Disables execution of build commands for executors and suites. ``` #### Niceness diff --git a/rebench/configurator.py b/rebench/configurator.py index b15c691b..cde033b6 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -158,9 +158,10 @@ def __init__(self, raw_config, data_store, ui, cli_options=None, cli_reporter=No # capture invocation and iteration settings and override when quick is selected invocations = cli_options.invocations if cli_options else None iterations = cli_options.iterations if cli_options else None - if cli_options and cli_options.quick: - invocations = 1 - iterations = 1 + if cli_options: + if cli_options.setup_only or cli_options.quick: + invocations = 1 + iterations = 1 self._root_run_details = ExpRunDetails.compile( raw_config.get('runs', {}), ExpRunDetails.default( @@ -275,6 +276,18 @@ def get_runs(self): runs = set() for exp in list(self._experiments.values()): runs |= exp.get_runs() + + if self._options and self._options.setup_only: + # filter out runs we don't need to trigger a build + runs_with_builds = set() + build_commands = set() + + for run in runs: + commands = run.build_commands() + if not build_commands >= commands: + runs_with_builds.add(run) + build_commands.update(commands) + runs = runs_with_builds return runs def _compile_experiments(self, experiments): diff --git a/rebench/model/run_id.py b/rebench/model/run_id.py index 343c8bb5..d7faf133 100644 --- a/rebench/model/run_id.py +++ b/rebench/model/run_id.py @@ -116,6 +116,16 @@ def location(self): return None return self._expand_vars(self._benchmark.suite.location) + def build_commands(self): + commands = set() + builds = self._benchmark.suite.executor.build + if builds: + commands.update(builds) + builds = self._benchmark.suite.build + if builds: + commands.update(builds) + return commands + def requires_warmup(self): return self._benchmark.run_details.warmup > 0 diff --git a/rebench/model/termination_check.py b/rebench/model/termination_check.py index ea440101..bfbb64b7 100644 --- a/rebench/model/termination_check.py +++ b/rebench/model/termination_check.py @@ -55,7 +55,7 @@ def should_terminate(self, number_of_data_points): msg = "{ind}Execution has failed, benchmark is aborted.\n" if self._consecutive_erroneous_executions > 0: msg += "{ind}{ind}The benchmark failed " - msg += str(self._consecutive_erroneous_executions) + " times in a row." + msg += str(self._consecutive_erroneous_executions) + " times in a row.\n" self._ui.warning(msg, self._run_id) return True elif self.has_too_many_failures(number_of_data_points): diff --git a/rebench/rebench.py b/rebench/rebench.py index 43b89be3..81975d33 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -91,6 +91,10 @@ def shell_options(self): execution = parser.add_argument_group( 'Execution Options', 'Adapt how ReBench executes benchmarks') + execution.add_argument( + '-N', '--without-nice', action='store_false', dest='use_nice', + help='Used for debugging and environments without the tool nice.', + default=True) execution.add_argument( '-in', '--invocations', action='store', dest='invocations', help='The number of times an executor is started to execute a run.', @@ -104,9 +108,11 @@ def shell_options(self): help='Execute quickly. Identical with --iterations=1 --invocations=1', default=False) execution.add_argument( - '-N', '--without-nice', action='store_false', dest='use_nice', - help='Used for debugging and environments without the tool nice.', - default=True) + '--setup-only', action='store_true', dest='setup_only', + help=('Build all executors and suites, and run one benchmark for each executor. ' + + 'This ensures executors and suites are built. ' + + ' It Implies --iterations=1 --invocations=1.'), + default=False) execution.add_argument( '-B', '--without-building', action='store_false', dest='do_builds', help='Disables execution of build commands for executors and suites.', diff --git a/rebench/tests/features/issue_110.conf b/rebench/tests/features/issue_110.conf new file mode 100644 index 00000000..c3287304 --- /dev/null +++ b/rebench/tests/features/issue_110.conf @@ -0,0 +1,72 @@ +default_experiment: Test + +build_log: build.log + +runs: + invocations: 10 + min_iteration_time: 0 + +benchmark_suites: + Suite1: + gauge_adapter: Time + command: " 1 " + benchmarks: + - BenchS1 + Suite2: + gauge_adapter: Time + command: " 2 " + benchmarks: + - BenchS2 + SuiteWithBuild: + gauge_adapter: Time + command: " 3 " + benchmarks: + - BenchWithBuild + build: + - echo Built Suite + +executors: + BashA: + executable: ./vm_110a.sh + args: foo bar 1 + build: + - | + echo "#!/bin/bash" > vm_110a.sh + echo "echo \$@" >> vm_110a.sh + echo Built VM110A + chmod +x vm_110a.sh + BashB: + executable: ./vm_110b.sh + args: foo bar 2 + build: + - | + echo "#!/bin/bash" > vm_110b.sh + echo "echo \$@" >> vm_110b.sh + echo Built VM110B + chmod +x vm_110b.sh + +experiments: + Complete: + suites: + - Suite1 + - Suite2 + - SuiteWithBuild + executions: + - BashA + - BashB + A1: + suites: + - Suite1 + executions: + - BashA + B2: + suites: + - Suite2 + executions: + - BashB + + SuiteWithBuild: + suites: + - SuiteWithBuild + executions: + - BashA diff --git a/rebench/tests/features/issue_110_setup_run_test.py b/rebench/tests/features/issue_110_setup_run_test.py new file mode 100644 index 00000000..3d7994e7 --- /dev/null +++ b/rebench/tests/features/issue_110_setup_run_test.py @@ -0,0 +1,151 @@ +# Copyright (c) 2019 Stefan Marr +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import os + +from ..rebench_test_case import ReBenchTestCase + +from ...configurator import Configurator, load_config +from ...executor import Executor +from ...persistence import DataStore +from ...rebench import ReBench + + +class Issue110Test(ReBenchTestCase): + + def _cleanup_file(self, file_name): + if os.path.isfile(self._path + '/' + file_name): + os.remove(self._path + '/' + file_name) + + def setUp(self): + super(Issue110Test, self).setUp() + self._set_path(__file__) + self._cleanup_file('build.log') + self._cleanup_file('rebench.data') + self._data_store = DataStore(self._ui) + self._cli_options = ReBench().shell_options().parse_args(['-d', '--setup-only', 'dummy']) + + def tearDown(self): + self._cleanup_file('rebench.data') + self._cleanup_file('build.log') + self._cleanup_file('vm_110a.sh') + self._cleanup_file('vm_110b.sh') + + def _read_log(self): + file_name = 'build.log' + file_path = self._path + '/' + file_name + if os.path.isfile(file_path): + with open(file_path, 'r') as log_file: + lines = log_file.read().strip().split("\n") + return set(lines) + return None + + def _assert_runs(self, cnf, num_runs, num_dps, num_invocations): + runs = cnf.get_runs() + self.assertEqual(num_runs, len(runs), "incorrect number of runs") + + for run in runs: + self.assertEqual(num_dps, run.get_number_of_data_points(), + "incorrect num of data points") + self.assertEqual(num_invocations, run.completed_invocations, + "incorrect num of invocations") + + def _execute(self, cnf): + ex = Executor(cnf.get_runs(), False, True, self._ui, build_log=cnf.build_log) + ex.execute() + + def test_complete(self): + cnf = Configurator(load_config(self._path + '/issue_110.conf'), + self._data_store, self._ui, self._cli_options, + exp_name='Complete', data_file=self._tmp_file) + self._data_store.load_data(None, False) + + runs = cnf.get_runs() + # depending on the ordering in the runs, + # we may get 2 or 3 executions since SuiteWithBuild + # uses one of the executors too + self.assertTrue(len(runs) == 3 or len(runs) == 2) + for run in runs: + self.assertEqual(0, run.get_number_of_data_points(), + "incorrect num of data points") + self.assertEqual(0, run.completed_invocations, + "incorrect num of invocations") + + self._execute(cnf) + + runs = cnf.get_runs() + self.assertTrue(len(runs) == 3 or len(runs) == 2) + + for run in runs: + self.assertEqual(1, run.get_number_of_data_points(), + "incorrect num of data points") + self.assertEqual(1, run.completed_invocations, + "incorrect num of invocations") + + log = self._read_log() + self.assertEqual({"E:BashB|STD:Built VM110B", + "E:BashA|STD:Built VM110A", + "S:SuiteWithBuild|STD:Built Suite"}, log) + + def test_a1(self): + cnf = Configurator(load_config(self._path + '/issue_110.conf'), + self._data_store, self._ui, self._cli_options, + exp_name='A1', data_file=self._tmp_file) + self._data_store.load_data(None, False) + + self._assert_runs(cnf, 1, 0, 0) + + self._execute(cnf) + + self._assert_runs(cnf, 1, 1, 1) + + log = self._read_log() + self.assertEqual({"E:BashA|STD:Built VM110A"}, log) + + def test_b2(self): + cnf = Configurator(load_config(self._path + '/issue_110.conf'), + self._data_store, self._ui, self._cli_options, + exp_name='B2', data_file=self._tmp_file) + self._data_store.load_data(None, False) + + self._assert_runs(cnf, 1, 0, 0) + + self._execute(cnf) + + self._assert_runs(cnf, 1, 1, 1) + + log = self._read_log() + self.assertEqual({"E:BashB|STD:Built VM110B"}, log) + + def test_suite_with_build(self): + cnf = Configurator(load_config(self._path + '/issue_110.conf'), + self._data_store, self._ui, self._cli_options, + exp_name='SuiteWithBuild', data_file=self._tmp_file) + self._data_store.load_data(None, False) + + # Has not executed yet, check that there is simply + self._assert_runs(cnf, 1, 0, 0) + + self._execute(cnf) + + self._assert_runs(cnf, 1, 1, 1) + + log = self._read_log() + self.assertEqual({"E:BashA|STD:Built VM110A", + "S:SuiteWithBuild|STD:Built Suite"}, log) diff --git a/rebench/tests/features/issue_58_build_vm_test.py b/rebench/tests/features/issue_58_build_vm_test.py index 72e498cb..2734ef51 100644 --- a/rebench/tests/features/issue_58_build_vm_test.py +++ b/rebench/tests/features/issue_58_build_vm_test.py @@ -31,6 +31,10 @@ class Issue58BuildExecutor(ReBenchTestCase): def setUp(self): super(Issue58BuildExecutor, self).setUp() self._set_path(__file__) + self._cleanup_log() + + def tearDown(self): + self._cleanup_log() def _cleanup_log(self): if os.path.isfile(self._path + '/build.log'): @@ -41,7 +45,6 @@ def _read_log(self): return log_file.read() def test_build_executor_simple_cmd(self): - self._cleanup_log() cnf = Configurator(load_config(self._path + '/issue_58.conf'), DataStore(self._ui), self._ui, data_file=self._tmp_file, exp_name='A') runs = list(cnf.get_runs()) @@ -58,8 +61,6 @@ def test_build_executor_simple_cmd(self): os.remove(self._path + '/vm_58a.sh') def test_build_executor_cmd_list(self): - self._cleanup_log() - cnf = Configurator(load_config(self._path + '/issue_58.conf'), DataStore(self._ui), self._ui, data_file=self._tmp_file, exp_name='B') runs = list(cnf.get_runs()) @@ -76,8 +77,6 @@ def test_build_executor_cmd_list(self): os.remove(self._path + '/vm_58b.sh') def test_build_output_in_log(self): - self._cleanup_log() - self.test_build_executor_simple_cmd() log = self._read_log() @@ -86,8 +85,6 @@ def test_build_output_in_log(self): "E:BashA|STD:standard\nE:BashA|ERR:error\n", log) def test_broken_build_prevents_experiments(self): - self._cleanup_log() - cnf = Configurator(load_config(self._path + '/issue_58.conf'), DataStore(self._ui), self._ui, data_file=self._tmp_file, exp_name='C') @@ -110,7 +107,6 @@ def test_broken_build_prevents_experiments(self): "E:BashC|STD:standard\nE:BashC|ERR:error\n", log) def test_build_is_run_only_once_for_same_command(self): - self._cleanup_log() cnf = Configurator(load_config(self._path + '/issue_58.conf'), DataStore(self._ui), self._ui, data_file=self._tmp_file, exp_name='AandAA') diff --git a/rebench/tests/rebench_test_case.py b/rebench/tests/rebench_test_case.py index 98ac5e53..fbc9f307 100644 --- a/rebench/tests/rebench_test_case.py +++ b/rebench/tests/rebench_test_case.py @@ -55,8 +55,8 @@ def _assert_runs(self, cnf, num_runs, num_dps, num_invocations): :return: """ runs = cnf.get_runs() - self.assertEqual(num_runs, len(runs)) + self.assertEqual(num_runs, len(runs), "incorrect number of runs") run = list(runs)[0] - self.assertEqual(num_dps, run.get_number_of_data_points()) - self.assertEqual(num_invocations, run.completed_invocations) + self.assertEqual(num_dps, run.get_number_of_data_points(), "incorrect num of data points") + self.assertEqual(num_invocations, run.completed_invocations, "incorrect num of invocations")