Skip to content

Commit

Permalink
Merge pull request #2746 from ekouts/feat/dry-run
Browse files Browse the repository at this point in the history
[feat] Introduce a `--dry-run` option
  • Loading branch information
vkarak committed Feb 22, 2023
2 parents 088207c + 4210494 commit 9bf69b7
Show file tree
Hide file tree
Showing 10 changed files with 227 additions and 100 deletions.
14 changes: 14 additions & 0 deletions docs/manpage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,20 @@ An action must always be specified.
.. versionadded:: 3.10.0


.. option:: --dry-run

Dry run the selected tests.

The dry-run mode will try to execute as much of the test pipeline as possible.
More specifically, the tests will not be submitted and will not be run for real,
but their stage directory will be prepared and the corresponding job script will be emitted.
Similarly, the sanity and performance functions will not be evaluated but all the preparation will happen.
Tests run in dry-run mode will not fail unless there is a programming error in the test or if the test tries to use a resource that is not produced in dry run mode (e.g., access the standard output or a resource produced by a dependency outside any sanity or performance function).
In this case, users can call the :func:`~reframe.core.pipeline.RegressionTest.is_dry_run` method in their test and take a specific action if the test is run in dry-run mode.

.. versionadded:: 4.1


.. option:: -L, --list-detailed[=T|C]

List selected tests providing more details for each test.
Expand Down
31 changes: 30 additions & 1 deletion reframe/core/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import reframe.utility.osext as osext
from reframe.core.exceptions import ReframeSyntaxError, SkipTestError, what
from reframe.core.fields import make_convertible
from reframe.core.fixtures import FixtureRegistry
from reframe.core.logging import getlogger, time_function
from reframe.core.pipeline import RegressionTest
Expand All @@ -25,6 +26,28 @@
# NOTE: we should consider renaming this module in 4.0; it practically takes
# care of the registration and instantiation of the tests.

def _setvars(registry, variables):
unset_vars = {}
for test in registry:
for name, val in variables.items():
if '.' in name:
testname, varname = name.split('.', maxsplit=1)
else:
testname, varname = test.__name__, name

if testname == test.__name__:
# Treat special values
if val == '@none':
val = None
else:
val = make_convertible(val)

if not test.setvar(varname, val):
unset_vars.setdefault(test.__name__, [])
unset_vars[test.__name__].append(varname)

return unset_vars


class TestRegistry:
'''Regression test registry.
Expand Down Expand Up @@ -52,8 +75,11 @@ def add(self, test, *args, **kwargs):
self._tests.setdefault(test, [])
self._tests[test].append((args, kwargs))

def setvars(self, variables):
return _setvars(self, variables)

@time_function
def instantiate_all(self, reset_sysenv=0):
def instantiate_all(self, reset_sysenv=0, external_vars=None):
'''Instantiate all the registered tests.
:param reset_sysenv: Reset valid_systems and valid_prog_environs after
Expand Down Expand Up @@ -105,6 +131,9 @@ def instantiate_all(self, reset_sysenv=0):

# Instantiate the new fixtures and update the registry
new_fixtures = tmp_registry.difference(fixture_registry)
if external_vars:
_setvars(new_fixtures.uninst_tests(), external_vars)

leaf_tests = new_fixtures.instantiate_all()
fixture_registry.update(new_fixtures)

Expand Down
5 changes: 4 additions & 1 deletion reframe/core/fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,6 @@ def instantiate_all(self):
'valid_systems': part,
**variables
}

try:
# Instantiate the fixture
inst = cls(variant_num=varnum, fixt_name=name,
Expand All @@ -322,6 +321,10 @@ def instantiate_all(self):

return ret

def uninst_tests(self):
'''Get the uninstantiated tests of this registry'''
return self._registry.keys()

def _filter_valid_partitions(self, candidate_parts):
return [p for p in candidate_parts if p in self._env_by_part]

Expand Down
41 changes: 35 additions & 6 deletions reframe/core/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -963,6 +963,11 @@ def pipeline_hooks(cls):
#: :default: :class:`True`
build_locally = variable(typ.Bool, value=True, loggable=True)

# Special variables

#: Dry-run mode
_rfm_dry_run = variable(typ.Bool, value=False)

def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)

Expand Down Expand Up @@ -1514,6 +1519,14 @@ def is_fixture(self):
'''Check if the test is a fixture.'''
return getattr(self, '_rfm_is_fixture', False)

def is_dry_run(self):
'''Check if the test runs in dry-run mode.
.. versionadded:: 4.1
'''
return self._rfm_dry_run

def _resolve_fixtures(self):
'''Resolve the fixture dependencies and inject the fixture handle.
Expand Down Expand Up @@ -1814,7 +1827,8 @@ def compile(self):
except OSError as e:
raise PipelineError('failed to prepare build job') from e

self._build_job.submit()
if not self.is_dry_run():
self._build_job.submit()

@final
def compile_wait(self):
Expand All @@ -1836,6 +1850,9 @@ def compile_wait(self):
more details.
'''
if self.is_dry_run():
return

self._build_job.wait()

# We raise a BuildError when we an exit code and it is non zero
Expand Down Expand Up @@ -1955,9 +1972,9 @@ def _get_cp_env():
except OSError as e:
raise PipelineError('failed to prepare run job') from e

self._job.submit()

self.logger.debug(f'Spawned run job (id={self.job.jobid})')
if not self.is_dry_run():
self._job.submit()
self.logger.debug(f'Spawned run job (id={self.job.jobid})')

# Update num_tasks if test is flexible
if self.job.sched_flex_alloc_nodes:
Expand All @@ -1982,7 +1999,7 @@ def compile_complete(self):
:raises reframe.core.exceptions.ReframeError: In case of errors.
'''
if not self._build_job:
if not self._build_job or self.is_dry_run():
return True

return self._build_job.finished()
Expand Down Expand Up @@ -2011,7 +2028,7 @@ def run_complete(self):
more details.
'''
if not self._job:
if not self._job or self.is_dry_run():
return True

return self._job.finished()
Expand All @@ -2034,6 +2051,9 @@ def run_wait(self):
more details.
'''
if self.is_dry_run():
return

self._job.wait()

@final
Expand Down Expand Up @@ -2095,6 +2115,9 @@ def check_sanity(self):
elif not hasattr(self, 'sanity_patterns'):
raise SanityError('sanity_patterns not set')

if self.is_dry_run():
return

with osext.change_dir(self._stagedir):
success = sn.evaluate(self.sanity_patterns)
if not success:
Expand Down Expand Up @@ -2151,6 +2174,9 @@ def check_performance(self):
self.perf_variables[var] = sn.make_performance_function(expr,
unit)

if self.is_dry_run():
return

# Evaluate the performance function and retrieve the metrics
with osext.change_dir(self._stagedir):
for tag, expr in self.perf_variables.items():
Expand Down Expand Up @@ -2272,6 +2298,9 @@ def cleanup(self, remove_files=False):
more details.
'''
if self.is_dry_run():
return

aliased = os.path.samefile(self._stagedir, self._outputdir)
if aliased:
self.logger.debug(
Expand Down
11 changes: 10 additions & 1 deletion reframe/frontend/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,10 @@ def main():
'-r', '--run', action='store_true',
help='Run the selected checks'
)
action_options.add_argument(
'--dry-run', action='store_true',
help='Dry run the tests without submitting them for execution'
)

# Run options
run_options.add_argument(
Expand Down Expand Up @@ -896,6 +900,9 @@ def restrict_logging():
else:
external_vars[lhs] = rhs

if options.dry_run:
external_vars['_rfm_dry_run'] = '1'

loader = RegressionCheckLoader(check_search_path,
check_search_recursive,
external_vars,
Expand Down Expand Up @@ -1166,10 +1173,11 @@ def _sort_testcases(testcases):
)
sys.exit(0)

if not options.run:
if not options.run and not options.dry_run:
printer.error("No action option specified. Available options:\n"
" - `-l'/`-L' for listing\n"
" - `-r' for running\n"
" - `--dry-run' for dry running\n"
" - `--list-tags' for listing unique test tags\n"
" - `--ci-generate' for generating a CI pipeline\n"
f"Try `{argparser.prog} -h' for more options.")
Expand Down Expand Up @@ -1266,6 +1274,7 @@ def module_unuse(*paths):
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
exec_policy.dry_run_mode = options.dry_run
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
Expand Down
1 change: 1 addition & 0 deletions reframe/frontend/executors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -587,6 +587,7 @@ def __init__(self):
self.skip_sanity_check = False
self.skip_performance_check = False
self.keep_stage_files = False
self.dry_run_mode = False
self.only_environs = None
self.printer = None

Expand Down
21 changes: 17 additions & 4 deletions reframe/frontend/executors/policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,13 @@ def __init__(self):

def runcase(self, case):
super().runcase(case)
check, partition, environ = case
check, partition, _ = case
task = RegressionTask(case, self.task_listeners)
self.printer.status('RUN', task.info())
if check.is_dry_run():
self.printer.status('DRY', task.info())
else:
self.printer.status('RUN', task.info())

self._task_index[case] = task
self.stats.add_task(task)
try:
Expand Down Expand Up @@ -138,7 +142,9 @@ def runcase(self, case):

self._pollctl.reset_snooze_time()
while True:
sched.poll(task.check.job)
if not self.dry_run_mode:
sched.poll(task.check.job)

if task.run_complete():
break

Expand Down Expand Up @@ -362,6 +368,9 @@ def exit(self):
self._dump_pipeline_progress('pipeline-progress.json')

def _poll_tasks(self):
if self.dry_run_mode:
return

for partname, sched in self._schedulers.items():
jobs = []
for t in self._partition_tasks[partname]:
Expand Down Expand Up @@ -431,7 +440,11 @@ def _advance_startup(self, task):
return 1
elif self.deps_succeeded(task):
try:
self.printer.status('RUN', task.info())
if task.check.is_dry_run():
self.printer.status('DRY', task.info())
else:
self.printer.status('RUN', task.info())

task.setup(task.testcase.partition,
task.testcase.environ,
sched_flex_alloc_nodes=self.sched_flex_alloc_nodes,
Expand Down
31 changes: 5 additions & 26 deletions reframe/frontend/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import sys
import traceback

import reframe.core.fields as fields
import reframe.utility as util
import reframe.utility.osext as osext
from reframe.core.exceptions import NameConflictError, is_severe, what
Expand Down Expand Up @@ -133,29 +132,6 @@ def prefix(self):
def recurse(self):
return self._recurse

def _set_defaults(self, test_registry):
if test_registry is None:
return

self._unset_vars = {}
for test in test_registry:
for name, val in self._external_vars.items():
if '.' in name:
testname, varname = name.split('.', maxsplit=1)
else:
testname, varname = test.__name__, name

if testname == test.__name__:
# Treat special values
if val == '@none':
val = None
else:
val = fields.make_convertible(val)

if not test.setvar(varname, val):
self._unset_vars.setdefault(test.__name__, [])
self._unset_vars[test.__name__].append(varname)

def load_from_module(self, module):
'''Load user checks from module.
Expand All @@ -164,10 +140,13 @@ def load_from_module(self, module):
are validated before return.
'''
registry = getattr(module, '_rfm_test_registry', None)
self._set_defaults(registry)
if registry:
self._unset_vars.update(registry.setvars(self._external_vars))

reset_sysenv = self._skip_prgenv_check << 1 | self._skip_system_check
if registry:
candidate_tests = registry.instantiate_all(reset_sysenv)
candidate_tests = registry.instantiate_all(reset_sysenv,
self._external_vars)
else:
candidate_tests = []

Expand Down
6 changes: 3 additions & 3 deletions reframe/frontend/printer.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,15 @@ def status(self, status, message='', just=None, level=logging.INFO):

status_stripped = status.strip()
if self.colorize:
if status_stripped == 'SKIP':
if status_stripped in ('DRY', 'SKIP'):
status = color.colorize(status, color.YELLOW)
elif status_stripped in ['FAIL', 'FAILED', 'ERROR']:
elif status_stripped in ('FAIL', 'FAILED', 'ERROR'):
status = color.colorize(status, color.RED)
else:
status = color.colorize(status, color.GREEN)

final_msg = f'[ {status} ] '
if status_stripped in ['OK', 'SKIP', 'FAIL']:
if status_stripped in ('OK', 'SKIP', 'FAIL'):
self._progress_count += 1
width = len(str(self._progress_total))
padded_progress = str(self._progress_count).rjust(width)
Expand Down

0 comments on commit 9bf69b7

Please sign in to comment.