diff --git a/config/cscs-ci.py b/config/cscs-ci.py index 5aef37f666..ef48ea7539 100644 --- a/config/cscs-ci.py +++ b/config/cscs-ci.py @@ -24,9 +24,6 @@ { 'name': 'gpu', 'scheduler': 'slurm', - 'modules': [ - 'daint-gpu' - ], 'access': [ '--constraint=gpu', '--partition=cscsci', @@ -61,9 +58,6 @@ { 'name': 'slurm', 'scheduler': 'slurm', - 'modules': [ - 'daint-gpu' - ], 'access': [ '--constraint=gpu', f'--account={osext.osgroup()}' @@ -86,9 +80,6 @@ { 'name': 'pbs', 'scheduler': 'pbs', - 'modules': [ - 'daint-gpu' - ], 'access': [ 'proc=gpu', f'-A {osext.osgroup()}' @@ -103,9 +94,6 @@ { 'name': 'torque', 'scheduler': 'torque', - 'modules': [ - 'daint-gpu' - ], 'access': [ '-l proc=gpu', f'-A {osext.osgroup()}' diff --git a/docs/manpage.rst b/docs/manpage.rst index b2bef7e9c0..fabbb58971 100644 --- a/docs/manpage.rst +++ b/docs/manpage.rst @@ -313,6 +313,13 @@ Options controlling ReFrame execution The test stage and output directories will receive a ``_retry`` suffix every time the test is retried. +.. option:: --maxfail=NUM + + The maximum number of failing test cases before the execution is aborted. + After ``NUM`` failed test cases the rest of the test cases will be aborted. + The counter of the failed test cases is reset to 0 in every retry. + + .. option:: --disable-hook=HOOK Disable the pipeline hook named ``HOOK`` from all the tests that will run. diff --git a/reframe/core/exceptions.py b/reframe/core/exceptions.py index f3d42a36a5..b2267c2f15 100644 --- a/reframe/core/exceptions.py +++ b/reframe/core/exceptions.py @@ -96,6 +96,10 @@ class TaskDependencyError(ReframeError): ''' +class FailureLimitError(ReframeError): + '''Raised when the limit of test failures has been reached.''' + + class AbortTaskError(ReframeError): '''Raised by the runtime inside a regression task to denote that it has been aborted due to an external reason (e.g., keyboard interrupt, fatal @@ -131,7 +135,7 @@ class PipelineError(ReframeError): ''' -class ReframeForceExitError(ReframeError): +class ForceExitError(ReframeError): '''Raised when ReFrame execution must be forcefully ended, e.g., after a SIGTERM was received. ''' @@ -280,8 +284,6 @@ def user_frame(exc_type, exc_value, tb): :returns: A frame object or :class:`None` if no user frame was found. - :meta private: - ''' if not inspect.istraceback(tb): return None @@ -294,8 +296,17 @@ def user_frame(exc_type, exc_value, tb): return None +def is_exit_request(exc_type, exc_value, tb): + '''Check if the error is a request to exit.''' + + return isinstance(exc_value, (KeyboardInterrupt, + ForceExitError, + FailureLimitError)) + + def is_severe(exc_type, exc_value, tb): '''Check if exception is a severe one.''' + soft_errors = (ReframeError, ConnectionError, FileExistsError, diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 35535fcfad..62cdb2d30e 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -316,6 +316,10 @@ def main(): help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) + run_options.add_argument( + '--maxfail', metavar='NUM', action='store', default=sys.maxsize, + help='Exit after first NUM failures' + ) run_options.add_argument( '--restore-session', action='store', nargs='?', const='', metavar='REPORT', @@ -738,11 +742,12 @@ def print_infoline(param, value): def _case_failed(t): rec = report.case(*t) - if rec and rec['result'] == 'failure': - return True - else: + if not rec: return False + return (rec['result'] == 'failure' or + rec['result'] == 'aborted') + testcases = list(filter(_case_failed, testcases)) printer.verbose( f'Filtering successful test case(s): ' @@ -918,7 +923,19 @@ def module_unuse(*paths): f'--max-retries is not a valid integer: {max_retries}' ) from None - runner = Runner(exec_policy, printer, max_retries) + try: + max_failures = int(options.maxfail) + if max_failures < 0: + raise errors.ConfigError( + f'--maxfail should be a non-negative integer: ' + f'{options.maxfail!r}' + ) + except ValueError: + raise errors.ConfigError( + f'--maxfail is not a valid integer: {options.maxfail!r}' + ) from None + + runner = Runner(exec_policy, printer, max_retries, max_failures) try: time_start = time.time() session_info['time_start'] = time.strftime( @@ -933,12 +950,12 @@ def module_unuse(*paths): session_info['time_elapsed'] = time_end - time_start # Print a retry report if we did any retries - if runner.stats.failures(run=0): + if runner.stats.failed(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run success = True - if runner.stats.failures(): + if runner.stats.failed(): success = False runner.stats.print_failure_report(printer) if options.failure_stats: @@ -984,16 +1001,14 @@ def module_unuse(*paths): sys.exit(1) sys.exit(0) - except KeyboardInterrupt: - sys.exit(1) - except errors.ReframeError as e: - printer.error(str(e)) - sys.exit(1) - except (Exception, errors.ReframeFatalError): + except (Exception, KeyboardInterrupt, errors.ReframeFatalError): exc_info = sys.exc_info() tb = ''.join(traceback.format_exception(*exc_info)) - printer.error(errors.what(*exc_info)) - if errors.is_severe(*exc_info): + printer.error(f'run session stopped: {errors.what(*exc_info)}') + if errors.is_exit_request(*exc_info): + # Print stack traces for exit requests only when TOO verbose + printer.debug2(tb) + elif errors.is_severe(*exc_info): printer.error(tb) else: printer.verbose(tb) diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 1dfec64362..301e3d7baf 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -15,14 +15,17 @@ import reframe.core.runtime as runtime import reframe.frontend.dependencies as dependencies import reframe.utility.jsonext as jsonext -from reframe.core.exceptions import (AbortTaskError, JobNotStartedError, - ReframeForceExitError, TaskExit) +from reframe.core.exceptions import (AbortTaskError, + JobNotStartedError, + FailureLimitError, + ForceExitError, + TaskExit) from reframe.core.schedulers.local import LocalJobScheduler from reframe.frontend.printer import PrettyPrinter from reframe.frontend.statistics import TestStats - -ABORT_REASONS = (KeyboardInterrupt, ReframeForceExitError, AssertionError) +ABORT_REASONS = (AssertionError, FailureLimitError, + KeyboardInterrupt, ForceExitError) class TestCase: @@ -136,6 +139,8 @@ def __init__(self, case, listeners=[]): # Timestamps for the start and finish phases of the pipeline self._timestamps = {} + self._aborted = False + def duration(self, phase): # Treat pseudo-phases first if phase == 'compile_complete': @@ -204,7 +209,7 @@ def exc_info(self): @property def failed(self): - return self._failed_stage is not None + return self._failed_stage is not None and not self._aborted @property def failed_stage(self): @@ -218,6 +223,10 @@ def succeeded(self): def completed(self): return self.failed or self.succeeded + @property + def aborted(self): + return self._aborted + def _notify_listeners(self, callback_name): for l in self._listeners: callback = getattr(l, callback_name) @@ -308,9 +317,13 @@ def fail(self, exc_info=None): self._notify_listeners('on_task_failure') def abort(self, cause=None): + if self.failed or self._aborted: + return + logging.getlogger().debug2('Aborting test case: {self.testcase!r}') exc = AbortTaskError() exc.__cause__ = cause + self._aborted = True try: # FIXME: we should perhaps extend the RegressionTest interface # for supporting job cancelling @@ -347,22 +360,28 @@ def on_task_success(self, task): def _handle_sigterm(signum, frame): - raise ReframeForceExitError('received TERM signal') + raise ForceExitError('received TERM signal') class Runner: '''Responsible for executing a set of regression tests based on an execution policy.''' - def __init__(self, policy, printer=None, max_retries=0): + def __init__(self, policy, printer=None, max_retries=0, + max_failures=sys.maxsize): self._policy = policy self._printer = printer or PrettyPrinter() self._max_retries = max_retries self._stats = TestStats() self._policy.stats = self._stats self._policy.printer = self._printer + self._policy.max_failures = max_failures signal.signal(signal.SIGTERM, _handle_sigterm) + @property + def max_failures(self): + return self._max_failures + @property def max_retries(self): return self._max_retries @@ -376,6 +395,7 @@ def stats(self): return self._stats def runall(self, testcases, restored_cases=None): + abort_reason = None num_checks = len({tc.check.name for tc in testcases}) self._printer.separator('short double line', 'Running %d check(s)' % num_checks) @@ -386,20 +406,27 @@ def runall(self, testcases, restored_cases=None): if self._max_retries: restored_cases = restored_cases or [] self._retry_failed(testcases + restored_cases) - finally: # Print the summary line - num_failures = len(self._stats.failures()) + num_failures = len(self._stats.failed()) + num_completed = len(self._stats.completed()) + if num_failures: + status = 'FAILED' + else: + status = 'PASSED' + self._printer.status( - 'FAILED' if num_failures else 'PASSED', - 'Ran %d test case(s) from %d check(s) (%d failure(s))' % - (len(testcases), num_checks, num_failures), just='center' + status, + f'Ran {num_completed}/{len(testcases)}' + f' test case(s) from {num_checks} check(s) ' + f'({num_failures} failure(s))', + just='center' ) self._printer.timestamp('Finished on', 'short double line') def _retry_failed(self, cases): rt = runtime.runtime() - failures = self._stats.failures() + failures = self._stats.failed() while (failures and rt.current_run < self._max_retries): num_failed_checks = len({tc.check.name for tc in failures}) rt.next_run() @@ -415,7 +442,7 @@ def _retry_failed(self, cases): cases_graph, _ = dependencies.build_deps(failed_cases, cases) failed_cases = dependencies.toposort(cases_graph, is_subgraph=True) self._runall(failed_cases) - failures = self._stats.failures() + failures = self._stats.failed() def _runall(self, testcases): def print_separator(check, prefix): @@ -475,7 +502,7 @@ def __init__(self): self.stats = None def enter(self): - pass + self._num_failed_tasks = 0 def exit(self): pass diff --git a/reframe/frontend/executors/policies.py b/reframe/frontend/executors/policies.py index 6a33544a3e..a23e24aeb8 100644 --- a/reframe/frontend/executors/policies.py +++ b/reframe/frontend/executors/policies.py @@ -9,7 +9,9 @@ import sys import time -from reframe.core.exceptions import (TaskDependencyError, TaskExit) +from reframe.core.exceptions import (FailureLimitError, + TaskDependencyError, + TaskExit) from reframe.core.logging import getlogger from reframe.frontend.executors import (ExecutionPolicy, RegressionTask, TaskEventListener, ABORT_REASONS) @@ -135,6 +137,7 @@ def runcase(self, case): return except ABORT_REASONS as e: task.abort(e) + raise except BaseException: task.fail(sys.exc_info()) @@ -149,6 +152,7 @@ def on_task_exit(self, task): pass def on_task_failure(self, task): + self._num_failed_tasks += 1 timings = task.pipeline_timings(['compile_complete', 'run_complete', 'total']) @@ -167,6 +171,10 @@ def on_task_failure(self, task): getlogger().info(f'==> test failed during {task.failed_stage!r}: ' f'test staged in {task.check.stagedir!r}') getlogger().verbose(f'==> {timings}') + if self._num_failed_tasks >= self.max_failures: + raise FailureLimitError( + f'maximum number of failures ({self.max_failures}) reached' + ) def on_task_success(self, task): timings = task.pipeline_timings(['compile_complete', @@ -258,6 +266,10 @@ def on_task_run(self, task): self._running_tasks[partname].append(task) def on_task_failure(self, task): + if task.aborted: + return + + self._num_failed_tasks += 1 msg = f'{task.check.info()} [{task.pipeline_timings_basic()}]' if task.failed_stage == 'cleanup': self.printer.status('ERROR', msg, just='right') @@ -272,6 +284,10 @@ def on_task_failure(self, task): getlogger().info(f'==> test failed during {task.failed_stage!r}: ' f'test staged in {stagedir!r}') getlogger().verbose(f'==> timings: {task.pipeline_timings_all()}') + if self._num_failed_tasks >= self.max_failures: + raise FailureLimitError( + f'maximum number of failures ({self.max_failures}) reached' + ) def on_task_success(self, task): msg = f'{task.check.info()} [{task.pipeline_timings_basic()}]' @@ -361,10 +377,9 @@ def runcase(self, case): return except ABORT_REASONS as e: - if not task.failed: - # Abort was caused due to failure elsewhere, abort current - # task as well - task.abort(e) + # If abort was caused due to failure elsewhere, abort current + # task as well + task.abort(e) self._failall(e) raise diff --git a/reframe/frontend/runreport.py b/reframe/frontend/runreport.py index cca26d2c18..c02c46b314 100644 --- a/reframe/frontend/runreport.py +++ b/reframe/frontend/runreport.py @@ -14,7 +14,7 @@ from reframe.utility.versioning import Version -DATA_VERSION = '1.2' +DATA_VERSION = '1.3' _SCHEMA = os.path.join(rfm.INSTALL_PREFIX, 'reframe/schemas/runreport.json') diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index b410949ad2..77412ecdcf 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -32,9 +32,15 @@ def tasks(self, run=-1): except IndexError: raise errors.StatisticsError(f'no such run: {run}') from None - def failures(self, run=-1): + def failed(self, run=-1): return [t for t in self.tasks(run) if t.failed] + def aborted(self, run=-1): + return [t for t in self.tasks(run) if t.aborted] + + def completed(self, run=-1): + return [t for t in self.tasks(run) if t.completed] + def num_cases(self, run=-1): return len(self.tasks(run)) @@ -76,6 +82,7 @@ def json(self, force=False): for runid, run in enumerate(self._alltasks): testcases = [] num_failures = 0 + num_aborted = 0 for t in run: check = t.check partition = check.current_partition @@ -137,6 +144,11 @@ def json(self, force=False): if t.failed: num_failures += 1 entry['result'] = 'failure' + elif t.aborted: + entry['result'] = 'aborted' + num_aborted += 1 + + if t.failed or t.aborted: entry['fail_phase'] = t.failed_stage if t.exc_info is not None: entry['fail_reason'] = errors.what(*t.exc_info) @@ -170,6 +182,7 @@ def json(self, force=False): self._run_data.append({ 'num_cases': len(run), 'num_failures': num_failures, + 'num_aborted': num_aborted, 'runid': runid, 'testcases': testcases }) @@ -183,7 +196,7 @@ def print_failure_report(self, printer): run_report = self.json()[-1] last_run = run_report['runid'] for r in run_report['testcases']: - if r['result'] == 'success': + if r['result'] == 'success' or r['result'] == 'aborted': continue retry_info = ( diff --git a/reframe/schemas/runreport.json b/reframe/schemas/runreport.json index 6bedb58b02..6afe98223d 100644 --- a/reframe/schemas/runreport.json +++ b/reframe/schemas/runreport.json @@ -80,7 +80,7 @@ "prefix": {"type": "string"}, "result": { "type": "string", - "enum": ["success", "failure"] + "enum": ["success", "failure", "aborted"] }, "scheduler": {"type": ["string", "null"]}, "stagedir": {"type": ["string", "null"]}, @@ -112,6 +112,7 @@ "hostname": {"type": "string"}, "num_cases": {"type": "number"}, "num_failures": {"type": "number"}, + "num_aborted": {"type": "number"}, "prefix_output": {"type": "string"}, "prefix_stage": {"type": "string"}, "time_elapsed": {"type": "number"}, @@ -134,6 +135,7 @@ "properties": { "num_cases": {"type": "number"}, "num_failures": {"type": "number"}, + "num_aborted": {"type": "number"}, "runid": {"type": "number"}, "testcases": { "type": "array", @@ -141,7 +143,7 @@ } }, "required": [ - "num_cases", "num_failures", "runid", "testcases" + "num_cases", "num_failures", "num_aborted", "runid", "testcases" ] } } diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 85a16ac3f4..5f789c443e 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -245,7 +245,7 @@ def test_check_submit_success(run_reframe, remote_exec_ctx): assert 'PASSED' in stdout # Assert that we have run only one test case - assert 'Ran 2 test case(s)' in stdout + assert 'Ran 2/2 test case(s)' in stdout assert 0 == returncode @@ -479,7 +479,7 @@ def test_execution_modes(run_reframe): assert 'Traceback' not in stderr assert 'FAILED' not in stdout assert 'PASSED' in stdout - assert 'Ran 2 test case' in stdout + assert 'Ran 2/2 test case' in stdout def test_no_ignore_check_conflicts(run_reframe): @@ -724,3 +724,39 @@ def test_failure_stats(run_reframe): assert 'Traceback' not in stdout assert 'Traceback' not in stderr assert returncode != 0 + + +def test_maxfail_option(run_reframe): + returncode, stdout, stderr = run_reframe( + more_options=['--maxfail', '1'], + system='testsys', + checkpath=['unittests/resources/checks/hellocheck.py'] + ) + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert 'Ran 2/2 test case(s) from 2 check(s) (0 failure(s))' in stdout + assert returncode == 0 + + +def test_maxfail_invalid_option(run_reframe): + returncode, stdout, stderr = run_reframe( + more_options=['--maxfail', 'foo'], + system='testsys', + checkpath=['unittests/resources/checks/hellocheck.py'] + ) + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert "--maxfail is not a valid integer: 'foo'" in stdout + assert returncode == 1 + + +def test_maxfail_negative(run_reframe): + returncode, stdout, stderr = run_reframe( + more_options=['--maxfail', '-2'], + system='testsys', + checkpath=['unittests/resources/checks/hellocheck.py'] + ) + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert "--maxfail should be a non-negative integer: '-2'" in stdout + assert returncode == 1 diff --git a/unittests/test_policies.py b/unittests/test_policies.py index fb43e61cb3..8d2ba435c5 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause +import contextlib import json import jsonschema import os @@ -19,8 +20,9 @@ import reframe.utility.jsonext as jsonext import reframe.utility.osext as osext from reframe.core.exceptions import (AbortTaskError, + FailureLimitError, ReframeError, - ReframeForceExitError, + ForceExitError, TaskDependencyError) from reframe.frontend.loader import RegressionCheckLoader @@ -118,9 +120,10 @@ def _make_cases(checks=None, sort=False, *args, **kwargs): def assert_runall(runner): - # Make sure that all cases finished or failed + # Make sure that all cases finished, failed or + # were aborted for t in runner.stats.tasks(): - assert t.succeeded or t.failed + assert t.succeeded or t.failed or t.aborted def assert_all_dead(runner): @@ -133,7 +136,7 @@ def assert_all_dead(runner): def num_failures_stage(runner, stage): stats = runner.stats - return len([t for t in stats.failures() if t.failed_stage == stage]) + return len([t for t in stats.failed() if t.failed_stage == stage]) def _validate_runreport(report): @@ -178,7 +181,7 @@ def test_runall(make_runner, make_cases, common_exec_ctx, tmp_path): assert 9 == runner.stats.num_cases() assert_runall(runner) - assert 5 == len(runner.stats.failures()) + assert 5 == len(runner.stats.failed()) assert 2 == num_failures_stage(runner, 'setup') assert 1 == num_failures_stage(runner, 'sanity') assert 1 == num_failures_stage(runner, 'performance') @@ -223,7 +226,7 @@ def test_runall_skip_system_check(make_runner, make_cases, common_exec_ctx): stats = runner.stats assert 10 == stats.num_cases() assert_runall(runner) - assert 5 == len(stats.failures()) + assert 5 == len(stats.failed()) assert 2 == num_failures_stage(runner, 'setup') assert 1 == num_failures_stage(runner, 'sanity') assert 1 == num_failures_stage(runner, 'performance') @@ -236,7 +239,7 @@ def test_runall_skip_prgenv_check(make_runner, make_cases, common_exec_ctx): stats = runner.stats assert 10 == stats.num_cases() assert_runall(runner) - assert 5 == len(stats.failures()) + assert 5 == len(stats.failed()) assert 2 == num_failures_stage(runner, 'setup') assert 1 == num_failures_stage(runner, 'sanity') assert 1 == num_failures_stage(runner, 'performance') @@ -250,7 +253,7 @@ def test_runall_skip_sanity_check(make_runner, make_cases, common_exec_ctx): stats = runner.stats assert 9 == stats.num_cases() assert_runall(runner) - assert 4 == len(stats.failures()) + assert 4 == len(stats.failed()) assert 2 == num_failures_stage(runner, 'setup') assert 0 == num_failures_stage(runner, 'sanity') assert 1 == num_failures_stage(runner, 'performance') @@ -265,13 +268,23 @@ def test_runall_skip_performance_check(make_runner, make_cases, stats = runner.stats assert 9 == stats.num_cases() assert_runall(runner) - assert 4 == len(stats.failures()) + assert 4 == len(stats.failed()) assert 2 == num_failures_stage(runner, 'setup') assert 1 == num_failures_stage(runner, 'sanity') assert 0 == num_failures_stage(runner, 'performance') assert 1 == num_failures_stage(runner, 'cleanup') +def test_runall_maxfail(make_runner, make_cases, common_exec_ctx): + runner = make_runner(max_failures=2) + with contextlib.suppress(FailureLimitError): + runner.runall(make_cases()) + + assert_runall(runner) + stats = runner.stats + assert 2 == len(stats.failed()) + + def test_strict_performance_check(make_runner, make_cases, common_exec_ctx): runner = make_runner() runner.policy.strict_check = True @@ -279,7 +292,7 @@ def test_strict_performance_check(make_runner, make_cases, common_exec_ctx): stats = runner.stats assert 9 == stats.num_cases() assert_runall(runner) - assert 6 == len(stats.failures()) + assert 6 == len(stats.failed()) assert 2 == num_failures_stage(runner, 'setup') assert 1 == num_failures_stage(runner, 'sanity') assert 2 == num_failures_stage(runner, 'performance') @@ -300,7 +313,7 @@ def test_force_local_execution(make_runner, make_cases, testsys_exec_ctx): for t in stats.tasks(): assert t.check.local - assert not stats.failures() + assert not stats.failed() def test_kbd_interrupt_within_test(make_runner, make_cases, common_exec_ctx): @@ -310,7 +323,7 @@ def test_kbd_interrupt_within_test(make_runner, make_cases, common_exec_ctx): runner.runall(make_cases([KeyboardInterruptCheck()])) stats = runner.stats - assert 1 == len(stats.failures()) + assert 1 == len(stats.failed()) assert_all_dead(runner) @@ -319,7 +332,7 @@ def test_system_exit_within_test(make_runner, make_cases, common_exec_ctx): runner = make_runner() runner.runall(make_cases([SystemExitCheck()])) stats = runner.stats - assert 1 == len(stats.failures()) + assert 1 == len(stats.failed()) def test_retries_bad_check(make_runner, make_cases, common_exec_ctx): @@ -330,7 +343,7 @@ def test_retries_bad_check(make_runner, make_cases, common_exec_ctx): assert 2 == runner.stats.num_cases() assert_runall(runner) assert runner.max_retries == rt.runtime().current_run - assert 2 == len(runner.stats.failures()) + assert 2 == len(runner.stats.failed()) # Ensure that the report does not raise any exception runner.stats.retry_report() @@ -344,7 +357,7 @@ def test_retries_good_check(make_runner, make_cases, common_exec_ctx): assert 1 == runner.stats.num_cases() assert_runall(runner) assert 0 == rt.runtime().current_run - assert 0 == len(runner.stats.failures()) + assert 0 == len(runner.stats.failed()) def test_pass_in_retries(make_runner, make_cases, tmp_path, common_exec_ctx): @@ -357,20 +370,20 @@ def test_pass_in_retries(make_runner, make_cases, tmp_path, common_exec_ctx): # Ensure that the test passed after retries in run `pass_run_no` assert 1 == runner.stats.num_cases() assert_runall(runner) - assert 1 == len(runner.stats.failures(run=0)) + assert 1 == len(runner.stats.failed(run=0)) assert pass_run_no == rt.runtime().current_run - assert 0 == len(runner.stats.failures()) + assert 0 == len(runner.stats.failed()) def test_sigterm_handling(make_runner, make_cases, common_exec_ctx): runner = make_runner() - with pytest.raises(ReframeForceExitError, + with pytest.raises(ForceExitError, match='received TERM signal'): runner.runall(make_cases([SelfKillCheck()])) assert_all_dead(runner) assert runner.stats.num_cases() == 1 - assert len(runner.stats.failures()) == 1 + assert len(runner.stats.failed()) == 1 @pytest.fixture @@ -389,8 +402,8 @@ def assert_dependency_run(runner): assert_runall(runner) stats = runner.stats assert 10 == stats.num_cases(0) - assert 4 == len(stats.failures()) - for tf in stats.failures(): + assert 4 == len(stats.failed()) + for tf in stats.failed(): check = tf.testcase.check _, exc_value, _ = tf.exc_info if check.name == 'T7' or check.name == 'T9': @@ -508,7 +521,7 @@ def test_concurrency_unlimited(async_runner, make_cases, make_async_exec_ctx): # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() assert_runall(runner) - assert 0 == len(runner.stats.failures()) + assert 0 == len(runner.stats.failed()) # Ensure that maximum concurrency was reached as fast as possible assert num_checks == max(monitor.num_tasks) @@ -536,7 +549,7 @@ def test_concurrency_limited(async_runner, make_cases, make_async_exec_ctx): # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() assert_runall(runner) - assert 0 == len(runner.stats.failures()) + assert 0 == len(runner.stats.failed()) # Ensure that maximum concurrency was reached as fast as possible assert max_jobs == max(monitor.num_tasks) @@ -578,7 +591,7 @@ def test_concurrency_none(async_runner, make_cases, make_async_exec_ctx): # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() assert_runall(runner) - assert 0 == len(runner.stats.failures()) + assert 0 == len(runner.stats.failed()) # Ensure that a single task was running all the time assert 1 == max(monitor.num_tasks) @@ -596,7 +609,8 @@ def test_concurrency_none(async_runner, make_cases, make_async_exec_ctx): def assert_interrupted_run(runner): assert 4 == runner.stats.num_cases() assert_runall(runner) - assert 4 == len(runner.stats.failures()) + assert 1 == len(runner.stats.failed()) + assert 3 == len(runner.stats.aborted()) assert_all_dead(runner) # Verify that failure reasons for the different tasks are correct @@ -684,7 +698,7 @@ def test_run_complete_fails_main_loop(async_runner, make_cases, assert_runall(runner) stats = runner.stats assert stats.num_cases() == num_checks - assert len(stats.failures()) == 2 + assert len(stats.failed()) == 2 # Verify that the succeeded test is the SleepCheck for t in stats.tasks(): @@ -704,7 +718,7 @@ def test_run_complete_fails_busy_loop(async_runner, make_cases, assert_runall(runner) stats = runner.stats assert stats.num_cases() == num_checks - assert len(stats.failures()) == 2 + assert len(stats.failed()) == 2 # Verify that the succeeded test is the SleepCheck for t in stats.tasks(): @@ -724,7 +738,7 @@ def test_compile_fail_reschedule_main_loop(async_runner, make_cases, stats = runner.stats assert num_checks == stats.num_cases() assert_runall(runner) - assert num_checks == len(stats.failures()) + assert num_checks == len(stats.failed()) def test_compile_fail_reschedule_busy_loop(async_runner, make_cases, @@ -740,7 +754,7 @@ def test_compile_fail_reschedule_busy_loop(async_runner, make_cases, stats = runner.stats assert num_checks == stats.num_cases() assert_runall(runner) - assert num_checks == len(stats.failures()) + assert num_checks == len(stats.failed()) @pytest.fixture diff --git a/unittests/test_utility.py b/unittests/test_utility.py index ff8006ec76..621f103d7d 100644 --- a/unittests/test_utility.py +++ b/unittests/test_utility.py @@ -1409,6 +1409,7 @@ def user_exec_ctx(request, temp_runtime): @pytest.fixture def modules_system(user_exec_ctx, monkeypatch): # Pretend to be on a clean modules environment + monkeypatch.setenv('MODULEPATH', '') monkeypatch.setenv('LOADEDMODULES', '') monkeypatch.setenv('_LMFILES_', '')