diff --git a/docs/regression_test_api.rst b/docs/regression_test_api.rst index fb406a100f..63d870244e 100644 --- a/docs/regression_test_api.rst +++ b/docs/regression_test_api.rst @@ -29,6 +29,23 @@ Regression Test Class Decorators Pipeline Hooks -------------- +.. versionadded:: 2.20 + + +Pipeline hooks is an easy way to perform operations while the test traverses the execution pipeline. +You can attach arbitrary functions to run before or after any pipeline stage, which are called *pipeline hooks*. +Multiple hooks can be attached before or after the same pipeline stage, in which case the order of execution will match the order in which the functions are defined in the class body of the test. +A single hook can also be applied to multiple stages and it will be executed multiple times. +All pipeline hooks of a test class are inherited by its subclasses. +Subclasses may override a pipeline hook of their parents by redefining the hook function and re-attaching it at the same pipeline stage. +There are seven pipeline stages where you can attach test methods: ``init``, ``setup``, ``compile``, ``run``, ``sanity``, ``performance`` and ``cleanup``. +The ``init`` stage is not a real pipeline stage, but it refers to the test initialization. + +Hooks attached to any stage will run exactly before or after this stage executes. +So although a "post-init" and a "pre-setup" hook will both run *after* a test has been initialized and *before* the test goes through the first pipeline stage, they will execute in different times: +the post-init hook will execute *right after* the test is initialized. +The framework will then continue with other activities and it will execute the pre-setup hook *just before* it schedules the test for executing its setup stage. + .. autodecorator:: reframe.core.decorators.run_after(stage) .. autodecorator:: reframe.core.decorators.run_before(stage) diff --git a/reframe/core/decorators.py b/reframe/core/decorators.py index 7f1f26f14c..f7a4085b9d 100644 --- a/reframe/core/decorators.py +++ b/reframe/core/decorators.py @@ -214,6 +214,13 @@ def _fn(*args, **kwargs): return deco +# Valid pipeline stages that users can specify in the `run_before()` and +# `run_after()` decorators +_USER_PIPELINE_STAGES = ( + 'init', 'setup', 'compile', 'run', 'sanity', 'performance', 'cleanup' +) + + def run_before(stage): '''Decorator for attaching a test method to a pipeline stage. @@ -226,19 +233,57 @@ def run_before(stage): The ``stage`` argument can be any of ``'setup'``, ``'compile'``, ``'run'``, ``'sanity'``, ``'performance'`` or ``'cleanup'``. - .. versionadded:: 2.20 ''' + if stage not in _USER_PIPELINE_STAGES: + raise ValueError(f'invalid pipeline stage specified: {stage!r}') + + if stage == 'init': + raise ValueError('pre-init hooks are not allowed') + return _runx('pre_' + stage) def run_after(stage): '''Decorator for attaching a test method to a pipeline stage. - This is completely analogous to the - :py:attr:`reframe.core.decorators.run_before`. + This is analogous to the :py:attr:`~reframe.core.decorators.run_before`, + except that ``'init'`` can also be used as the ``stage`` argument. In this + case, the hook will execute right after the test is initialized (i.e. + after the :func:`__init__` method is called), before entering the test's + pipeline. In essence, a post-init hook is equivalent to defining + additional :func:`__init__` functions in the test. All the other + properties of pipeline hooks apply equally here. The following code + + .. code-block:: python + + @rfm.run_after('init') + def foo(self): + self.x = 1 + + + is equivalent to + + .. code-block:: python + + def __init__(self): + self.x = 1 + + .. versionchanged:: 3.5.2 + Add the ability to define post-init hooks in tests. - .. versionadded:: 2.20 ''' + + if stage not in _USER_PIPELINE_STAGES: + raise ValueError(f'invalid pipeline stage specified: {stage!r}') + + # Map user stage names to the actual pipeline functions if needed + if stage == 'init': + stage = '__init__' + elif stage == 'compile': + stage = 'compile_wait' + elif stage == 'run': + stage = 'run_wait' + return _runx('post_' + stage) diff --git a/reframe/core/hooks.py b/reframe/core/hooks.py new file mode 100644 index 0000000000..ccca205486 --- /dev/null +++ b/reframe/core/hooks.py @@ -0,0 +1,135 @@ +# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import contextlib +import functools + +import reframe.utility as util + + +def attach_hooks(hooks): + '''Attach pipeline hooks to phase ``name''. + + This function returns a decorator for pipeline functions that will run the + registered hooks before and after the function. + + If ``name'' is :class:`None`, both pre- and post-hooks will run, otherwise + only the hooks of the phase ``name'' will be executed. + ''' + + def _deco(func): + def select_hooks(obj, kind): + phase = kind + func.__name__ + if phase not in hooks: + return [] + + return [h for h in hooks[phase] + if h.__name__ not in obj._disabled_hooks] + + @functools.wraps(func) + def _fn(obj, *args, **kwargs): + for h in select_hooks(obj, 'pre_'): + h(obj) + + func(obj, *args, **kwargs) + for h in select_hooks(obj, 'post_'): + h(obj) + + return _fn + + return _deco + + +class Hook: + '''A pipeline hook. + + This is essentially a function wrapper that hashes the functions by name, + since we want hooks to be overriden by name in subclasses. + ''' + + def __init__(self, fn): + self.__fn = fn + + def __getattr__(self, attr): + return getattr(self.__fn, attr) + + @property + def fn(self): + return self.__fn + + def __hash__(self): + return hash(self.__name__) + + def __eq__(self, other): + if not isinstance(other, type(self)): + return NotImplemented + + return self.__name__ == other.__name__ + + def __call__(self, *args, **kwargs): + return self.__fn(*args, **kwargs) + + def __repr__(self): + return repr(self.__fn) + + +class HookRegistry: + '''Global hook registry.''' + + @classmethod + def create(cls, namespace): + '''Create a hook registry from a class namespace. + + Hook functions have an `_rfm_attach` attribute that specify the stages + of the pipeline where they must be attached. Dependencies will be + resolved first in the post-setup phase if not assigned elsewhere. + ''' + + local_hooks = {} + fn_with_deps = [] + for v in namespace.values(): + if hasattr(v, '_rfm_attach'): + for phase in v._rfm_attach: + try: + local_hooks[phase].append(Hook(v)) + except KeyError: + local_hooks[phase] = [Hook(v)] + + with contextlib.suppress(AttributeError): + if v._rfm_resolve_deps: + fn_with_deps.append(Hook(v)) + + if fn_with_deps: + local_hooks['post_setup'] = ( + fn_with_deps + local_hooks.get('post_setup', []) + ) + + return cls(local_hooks) + + def __init__(self, hooks=None): + self.__hooks = {} + if hooks is not None: + self.update(hooks) + + def __getitem__(self, key): + return self.__hooks[key] + + def __setitem__(self, key, name): + self.__hooks[key] = name + + def __contains__(self, key): + return key in self.__hooks + + def __getattr__(self, name): + return getattr(self.__hooks, name) + + def update(self, hooks): + for phase, hks in hooks.items(): + self.__hooks.setdefault(phase, util.OrderedSet()) + for h in hks: + self.__hooks[phase].add(h) + + def __repr__(self): + return repr(self.__hooks) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 422997050a..beba37d54f 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -8,11 +8,13 @@ # -from reframe.core.exceptions import ReframeSyntaxError import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters import reframe.core.variables as variables +from reframe.core.exceptions import ReframeSyntaxError +from reframe.core.hooks import HookRegistry + class RegressionTestMeta(type): @@ -143,27 +145,12 @@ def __init__(cls, name, bases, namespace, **kwargs): # Set up the hooks for the pipeline stages based on the _rfm_attach # attribute; all dependencies will be resolved first in the post-setup # phase if not assigned elsewhere - hooks = {} - fn_with_deps = [] - for v in namespace.values(): - if hasattr(v, '_rfm_attach'): - for phase in v._rfm_attach: - try: - hooks[phase].append(v) - except KeyError: - hooks[phase] = [v] - - try: - if v._rfm_resolve_deps: - fn_with_deps.append(v) - except AttributeError: - pass - - if fn_with_deps: - hooks['post_setup'] = fn_with_deps + hooks.get('post_setup', []) + hooks = HookRegistry.create(namespace) + for b in bases: + if hasattr(b, '_rfm_pipeline_hooks'): + hooks.update(getattr(b, '_rfm_pipeline_hooks')) - cls._rfm_pipeline_hooks = hooks - cls._rfm_disabled_hooks = set() + cls._rfm_pipeline_hooks = hooks # HookRegistry(local_hooks) cls._final_methods = {v.__name__ for v in namespace.values() if hasattr(v, '_rfm_final')} diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index db0f66b349..f01bfe628d 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -23,6 +23,7 @@ import reframe.core.environments as env import reframe.core.fields as fields +import reframe.core.hooks as hooks import reframe.core.logging as logging import reframe.core.runtime as rt import reframe.utility as util @@ -77,52 +78,15 @@ DEPEND_FULLY = 3 -def _run_hooks(name=None): - def _deco(func): - def hooks(obj, kind): - if name is None: - hook_name = kind + func.__name__ - elif name is not None and name.startswith(kind): - hook_name = name - else: - # Just any name that does not exist - hook_name = 'xxx' - - func_names = set() - disabled_hooks = set() - func_list = [] - for cls in type(obj).mro(): - if hasattr(cls, '_rfm_disabled_hooks'): - disabled_hooks |= cls._rfm_disabled_hooks - - try: - funcs = cls._rfm_pipeline_hooks.get(hook_name, []) - if any(fn.__name__ in func_names for fn in funcs): - # hook has been overriden - continue - - func_names |= {fn.__name__ for fn in funcs} - func_list += funcs - except AttributeError: - pass - - # Remove the disabled hooks before returning - return [fn for fn in func_list - if fn.__name__ not in disabled_hooks] - - '''Run the hooks before and after func.''' - @functools.wraps(func) - def _fn(obj, *args, **kwargs): - for h in hooks(obj, 'pre_'): - h(obj) - - func(obj, *args, **kwargs) - for h in hooks(obj, 'post_'): - h(obj) - - return _fn - - return _deco +_PIPELINE_STAGES = ( + '__init__', + 'setup', + 'compile', 'compile_wait', + 'run', 'run_wait', + 'sanity', + 'performance', + 'cleanup' +) def final(fn): @@ -178,24 +142,22 @@ class RegressionTest(RegressionMixin, jsonext.JSONSerializable): ''' - @classmethod - def disable_hook(cls, hook_name): + def disable_hook(self, hook_name): '''Disable pipeline hook by name. :arg hook_name: The function name of the hook to be disabled. :meta private: ''' - cls._rfm_disabled_hooks.add(hook_name) + self._disabled_hooks.add(hook_name) @classmethod def pipeline_hooks(cls): ret = {} - for c in cls.mro(): - if hasattr(c, '_rfm_pipeline_hooks'): - for kind, hook in c._rfm_pipeline_hooks.items(): - ret.setdefault(kind, []) - ret[kind] += hook + for phase, hooks in cls._rfm_pipeline_hooks.items(): + ret[phase] = [] + for h in hooks: + ret[phase].append(h.fn) return ret @@ -804,7 +766,12 @@ def __new__(cls, *args, _rfm_use_params=False, **kwargs): os.path.dirname(inspect.getfile(cls)) ) - obj._rfm_init(name, prefix) + # Attach the hooks to the pipeline stages + for stage in _PIPELINE_STAGES: + cls._add_hooks(stage) + + # Initialize the test + obj.__rfm_init__(name, prefix) return obj def __init__(self): @@ -817,6 +784,19 @@ def _append_parameters_to_name(self): else: return '' + @classmethod + def _add_hooks(cls, stage): + pipeline_hooks = cls._rfm_pipeline_hooks + fn = getattr(cls, stage) + new_fn = hooks.attach_hooks(pipeline_hooks)(fn) + setattr(cls, '_rfm_pipeline_fn_' + stage, new_fn) + + def __getattribute__(self, name): + if name in _PIPELINE_STAGES: + name = f'_rfm_pipeline_fn_{name}' + + return super().__getattribute__(name) + @classmethod def __init_subclass__(cls, *, special=False, pin_prefix=False, **kwargs): super().__init_subclass__(**kwargs) @@ -829,7 +809,7 @@ def __init_subclass__(cls, *, special=False, pin_prefix=False, **kwargs): os.path.dirname(inspect.getfile(cls)) ) - def _rfm_init(self, name=None, prefix=None): + def __rfm_init__(self, name=None, prefix=None): if name is not None: self.name = name @@ -886,7 +866,11 @@ def _rfm_init(self, name=None, prefix=None): # Just an empty environment self._cdt_environ = env.Environment('__rfm_cdt_environ') + # Disabled hooks + self._disabled_hooks = set() + # Export read-only views to interesting fields + @property def current_environ(self): '''The programming environment that the regression test is currently @@ -1115,7 +1099,6 @@ def _setup_job(self, name, force_local=False, **job_opts): def _setup_perf_logging(self): self._perf_logger = logging.getperflogger(self) - @_run_hooks() @final def setup(self, partition, environ, **job_opts): '''The setup phase of the regression test pipeline. @@ -1165,7 +1148,6 @@ def _clone_to_stagedir(self, url): self.logger.debug(f'Cloning URL {url} into stage directory') osext.git_clone(self.sourcesdir, self._stagedir) - @_run_hooks('pre_compile') @final def compile(self): '''The compilation phase of the regression test pipeline. @@ -1272,7 +1254,6 @@ def compile(self): self._build_job.submit() - @_run_hooks('post_compile') @final def compile_wait(self): '''Wait for compilation phase to finish. @@ -1303,7 +1284,6 @@ def compile_wait(self): with osext.change_dir(self._stagedir): self.build_system.post_build(self._build_job) - @_run_hooks('pre_run') @final def run(self): '''The run phase of the regression test pipeline. @@ -1454,7 +1434,6 @@ def poll(self): 'please use run_complete() instead') return self.run_complete() - @_run_hooks('post_run') @final def run_wait(self): '''Wait for the run phase of this test to finish. @@ -1485,12 +1464,10 @@ def wait(self): 'please use run_wait() instead') self.run_wait() - @_run_hooks() @final def sanity(self): self.check_sanity() - @_run_hooks() @final def performance(self): try: @@ -1658,7 +1635,6 @@ def _copy_to_outputdir(self): else: shutil.copy2(f, self.outputdir) - @_run_hooks() @final def cleanup(self, remove_files=False): '''The cleanup phase of the regression test pipeline. @@ -1892,7 +1868,6 @@ class RunOnlyRegressionTest(RegressionTest, special=True): module. ''' - @_run_hooks() def setup(self, partition, environ, **job_opts): '''The setup stage of the regression test pipeline. @@ -1918,7 +1893,6 @@ def compile_wait(self): This is a no-op for this type of test. ''' - @_run_hooks('pre_run') def run(self): '''The run phase of the regression test pipeline. @@ -1948,7 +1922,6 @@ class CompileOnlyRegressionTest(RegressionTest, special=True): module. ''' - @_run_hooks() def setup(self, partition, environ, **job_opts): '''The setup stage of the regression test pipeline. diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index c6a14459bd..e107ec5c67 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -796,7 +796,7 @@ def _case_failed(t): # Disable hooks for tc in testcases: for h in options.hooks: - type(tc.check).disable_hook(h) + tc.check.disable_hook(h) # Act on checks if options.list or options.list_detailed: diff --git a/reframe/utility/__init__.py b/reframe/utility/__init__.py index 46a96c77b6..0d6f7d615e 100644 --- a/reframe/utility/__init__.py +++ b/reframe/utility/__init__.py @@ -883,7 +883,7 @@ def __repr__(self): if not vals: return type(self).__name__ + '()' else: - return '{' + ', '.join(repr(v) for v in vals) + '}' + return '{' + ', '.join(builtins.repr(v) for v in vals) + '}' # Container i/face def __contains__(self, item): diff --git a/unittests/test_ci.py b/unittests/test_ci.py index a3f221d24f..29810c4138 100644 --- a/unittests/test_ci.py +++ b/unittests/test_ci.py @@ -6,6 +6,7 @@ import io import jsonschema +import pytest import requests import yaml @@ -29,8 +30,12 @@ def test_ci_gitlab_pipeline(): pipeline = fp.getvalue() # Fetch the latest Gitlab CI JSON schema - response = requests.get('https://json.schemastore.org/gitlab-ci') - assert response.ok + try: + response = requests.get('https://json.schemastore.org/gitlab-ci') + except requests.exceptions.ConnectionError as e: + pytest.skip(f'could not reach URL: {e}') + else: + assert response.ok schema = response.json() jsonschema.validate(yaml.safe_load(pipeline), schema) diff --git a/unittests/test_cli.py b/unittests/test_cli.py index fff6223531..5c54b005e0 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -108,10 +108,11 @@ def _run_reframe(system='generic:default', @pytest.fixture def temp_runtime(tmp_path): - def _temp_runtime(site_config, system=None, options={}): + def _temp_runtime(site_config, system=None, options=None): + options = options or {} options.update({'systems/prefix': tmp_path}) with rt.temp_runtime(site_config, system, options): - yield rt.runtime + yield yield _temp_runtime diff --git a/unittests/test_dependencies.py b/unittests/test_dependencies.py index e1db9c54e4..53784f253f 100644 --- a/unittests/test_dependencies.py +++ b/unittests/test_dependencies.py @@ -85,10 +85,11 @@ def find_case(cname, ename, partname, cases): @pytest.fixture def temp_runtime(tmp_path): - def _temp_runtime(site_config, system=None, options={}): + def _temp_runtime(site_config, system=None, options=None): + options = options or {} options.update({'systems/prefix': tmp_path}) with rt.temp_runtime(site_config, system, options): - yield rt.runtime + yield yield _temp_runtime diff --git a/unittests/test_logging.py b/unittests/test_logging.py index e47e06e799..bb59f65f5d 100644 --- a/unittests/test_logging.py +++ b/unittests/test_logging.py @@ -226,7 +226,7 @@ def _temp_runtime(logging_config): fp.write(f'site_configuration = {util.ppretty(site_config)}') with rt.temp_runtime(fp.name): - yield rt.runtime() + yield return _temp_runtime diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index d15bcf8f46..d74e80dadf 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -56,10 +56,11 @@ def pinnedtest(): @pytest.fixture def temp_runtime(tmp_path): - def _temp_runtime(config_file, system=None, options={}): + def _temp_runtime(config_file, system=None, options=None): + options = options or {} options.update({'systems/prefix': str(tmp_path)}) with rt.temp_runtime(config_file, system, options): - yield rt.runtime() + yield yield _temp_runtime @@ -525,6 +526,57 @@ def set_resources(self): assert expected_job_options == set(test.job.options) +def test_unkown_pre_hook(): + with pytest.raises(ValueError): + class MyTest(rfm.RunOnlyRegressionTest): + @rfm.run_before('foo') + def prepare(self): + self.x = 1 + + +def test_unkown_post_hook(): + with pytest.raises(ValueError): + class MyTest(rfm.RunOnlyRegressionTest): + @rfm.run_after('foo') + def prepare(self): + self.x = 1 + + +def test_pre_init_hook(): + with pytest.raises(ValueError): + class MyTest(rfm.RunOnlyRegressionTest): + @rfm.run_before('init') + def prepare(self): + self.x = 1 + + +def test_post_init_hook(local_exec_ctx): + class _T0(rfm.RunOnlyRegressionTest): + x = variable(str, value='y') + y = variable(str, value='x') + + def __init__(self): + self.x = 'x' + + @rfm.run_after('init') + def prepare(self): + self.y += 'y' + + class _T1(_T0): + def __init__(self): + super().__init__() + self.z = 'z' + + t0 = _T0() + assert t0.x == 'x' + assert t0.y == 'xy' + + t1 = _T1() + assert t1.x == 'x' + assert t1.y == 'xy' + assert t1.z == 'z' + + def test_setup_hooks(HelloTest, local_exec_ctx): @fixtures.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): @@ -684,6 +736,35 @@ class MyTest(DerivedTest): } +def test_inherited_hooks_from_instantiated_tests(HelloTest, local_exec_ctx): + @fixtures.custom_prefix('unittests/resources/checks') + class T0(HelloTest): + def __init__(self): + super().__init__() + self.name = type(self).__name__ + self.executable = os.path.join('.', self.name) + self.var = 0 + + @rfm.run_after('setup') + def x(self): + self.var += 1 + + class T1(T0): + @rfm.run_before('run') + def y(self): + self.foo = 1 + + t0 = T0() + t1 = T1() + print('==> running t0') + _run(t0, *local_exec_ctx) + print('==> running t1') + _run(t1, *local_exec_ctx) + assert t0.var == 1 + assert t1.var == 1 + assert t1.foo == 1 + + def test_overriden_hooks(HelloTest, local_exec_ctx): @fixtures.custom_prefix('unittests/resources/checks') class BaseTest(HelloTest): @@ -742,7 +823,7 @@ def x(self): self.var += 5 test = MyTest() - MyTest.disable_hook('y') + test.disable_hook('y') _run(test, *local_exec_ctx) assert test.var == 5 assert test.foo == 0 diff --git a/unittests/test_policies.py b/unittests/test_policies.py index f6b3fe3862..6d06f738ce 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -63,10 +63,11 @@ def timestamps(self): @pytest.fixture def temp_runtime(tmp_path): - def _temp_runtime(site_config, system=None, options={}): + def _temp_runtime(site_config, system=None, options=None): + options = options or {} options.update({'systems/prefix': str(tmp_path)}) with rt.temp_runtime(site_config, system, options): - yield rt.runtime + yield yield _temp_runtime @@ -553,11 +554,18 @@ def on_task_setup(self, task): @pytest.fixture def make_async_exec_ctx(temp_runtime): + tmprt = None + def _make_async_exec_ctx(max_jobs): - yield from temp_runtime(fixtures.TEST_CONFIG_FILE, 'generic', - {'systems/partitions/max_jobs': max_jobs}) + nonlocal tmprt + tmprt = temp_runtime(fixtures.TEST_CONFIG_FILE, 'generic', + {'systems/partitions/max_jobs': max_jobs}) + next(tmprt) + + yield _make_async_exec_ctx - return _make_async_exec_ctx + with contextlib.suppress(StopIteration): + next(tmprt) @pytest.fixture @@ -591,8 +599,7 @@ def test_concurrency_unlimited(async_runner, make_cases, make_async_exec_ctx): num_checks = 3 # Trigger evaluation of the execution context - ctx = make_async_exec_ctx(num_checks) - next(ctx) + make_async_exec_ctx(max_jobs=num_checks) runner, monitor = async_runner runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) @@ -619,8 +626,7 @@ def test_concurrency_unlimited(async_runner, make_cases, make_async_exec_ctx): def test_concurrency_limited(async_runner, make_cases, make_async_exec_ctx): # The number of checks must be <= 2*max_jobs. num_checks, max_jobs = 5, 3 - ctx = make_async_exec_ctx(max_jobs) - next(ctx) + make_async_exec_ctx(max_jobs) runner, monitor = async_runner runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) @@ -661,8 +667,7 @@ def test_concurrency_limited(async_runner, make_cases, make_async_exec_ctx): def test_concurrency_none(async_runner, make_cases, make_async_exec_ctx): num_checks = 3 - ctx = make_async_exec_ctx(1) - next(ctx) + make_async_exec_ctx(max_jobs=1) runner, monitor = async_runner runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) @@ -702,9 +707,7 @@ def assert_interrupted_run(runner): def test_kbd_interrupt_in_wait_with_concurrency(async_runner, make_cases, make_async_exec_ctx): - ctx = make_async_exec_ctx(4) - next(ctx) - + make_async_exec_ctx(max_jobs=4) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ @@ -722,9 +725,7 @@ def test_kbd_interrupt_in_wait_with_limited_concurrency( # KeyboardInterruptCheck to finish first (the corresponding wait should # trigger the failure), so as to make the framework kill the remaining # three. - ctx = make_async_exec_ctx(2) - next(ctx) - + make_async_exec_ctx(max_jobs=2) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ @@ -737,9 +738,7 @@ def test_kbd_interrupt_in_wait_with_limited_concurrency( def test_kbd_interrupt_in_setup_with_concurrency(async_runner, make_cases, make_async_exec_ctx): - ctx = make_async_exec_ctx(4) - next(ctx) - + make_async_exec_ctx(max_jobs=4) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ @@ -752,9 +751,7 @@ def test_kbd_interrupt_in_setup_with_concurrency(async_runner, make_cases, def test_kbd_interrupt_in_setup_with_limited_concurrency( async_runner, make_cases, make_async_exec_ctx): - ctx = make_async_exec_ctx(2) - next(ctx) - + make_async_exec_ctx(max_jobs=2) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ @@ -767,9 +764,7 @@ def test_kbd_interrupt_in_setup_with_limited_concurrency( def test_run_complete_fails_main_loop(async_runner, make_cases, make_async_exec_ctx): - ctx = make_async_exec_ctx(1) - next(ctx) - + make_async_exec_ctx(max_jobs=1) runner, _ = async_runner num_checks = 3 runner.runall(make_cases([SleepCheckPollFail(10), @@ -787,9 +782,7 @@ def test_run_complete_fails_main_loop(async_runner, make_cases, def test_run_complete_fails_busy_loop(async_runner, make_cases, make_async_exec_ctx): - ctx = make_async_exec_ctx(1) - next(ctx) - + make_async_exec_ctx(max_jobs=1) runner, _ = async_runner num_checks = 3 runner.runall(make_cases([SleepCheckPollFailLate(1), @@ -807,9 +800,7 @@ def test_run_complete_fails_busy_loop(async_runner, make_cases, def test_compile_fail_reschedule_main_loop(async_runner, make_cases, make_async_exec_ctx): - ctx = make_async_exec_ctx(1) - next(ctx) - + make_async_exec_ctx(max_jobs=1) runner, _ = async_runner num_checks = 2 runner.runall(make_cases([SleepCheckPollFail(.1), CompileFailureCheck()])) @@ -822,9 +813,7 @@ def test_compile_fail_reschedule_main_loop(async_runner, make_cases, def test_compile_fail_reschedule_busy_loop(async_runner, make_cases, make_async_exec_ctx): - ctx = make_async_exec_ctx(1) - next(ctx) - + make_async_exec_ctx(max_jobs=1) runner, _ = async_runner num_checks = 2 runner.runall( diff --git a/unittests/test_schedulers.py b/unittests/test_schedulers.py index e01003a15c..3084d3bda1 100644 --- a/unittests/test_schedulers.py +++ b/unittests/test_schedulers.py @@ -45,10 +45,11 @@ def local_only(scheduler): @pytest.fixture def temp_runtime(tmp_path): - def _temp_runtime(site_config, system=None, options={}): + def _temp_runtime(site_config, system=None, options=None): + options = options or {} options.update({'systems/prefix': tmp_path}) with rt.temp_runtime(site_config, system, options): - yield rt.runtime + yield yield _temp_runtime diff --git a/unittests/test_utility.py b/unittests/test_utility.py index 2350ad1762..f322707d15 100644 --- a/unittests/test_utility.py +++ b/unittests/test_utility.py @@ -1442,10 +1442,11 @@ def test_cray_cle_info_missing_parts(tmp_path): @pytest.fixture def temp_runtime(tmp_path): - def _temp_runtime(site_config, system=None, options={}): + def _temp_runtime(site_config, system=None, options=None): + options = options or {} options.update({'systems/prefix': tmp_path}) - with rt.temp_runtime(site_config, system, options) as ctx: - yield ctx + with rt.temp_runtime(site_config, system, options): + yield yield _temp_runtime