diff --git a/reframe/core/logging.py b/reframe/core/logging.py index ed05763ccf..a749945076 100644 --- a/reframe/core/logging.py +++ b/reframe/core/logging.py @@ -726,6 +726,9 @@ def _update_check_extras(self): ) def log_performance(self, level, task, msg=None, multiline=False): + if self.extra['__rfm_check__'] is None: + return + self.extra['check_partition'] = task.testcase.partition.name self.extra['check_environ'] = task.testcase.environ.name self.extra['check_result'] = 'pass' if task.succeeded else 'fail' diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 957ea58689..19631c2a58 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -386,9 +386,8 @@ def finalize(self): self._current_stage = 'finalize' self._notify_listeners('on_task_success') - if self.check.is_performance_check(): - self._perflogger.log_performance(logging.INFO, self, - multiline=self._perflog_compat) + self._perflogger.log_performance(logging.INFO, self, + multiline=self._perflog_compat) @logging.time_function def cleanup(self, *args, **kwargs): @@ -398,9 +397,8 @@ def fail(self, exc_info=None): self._failed_stage = self._current_stage self._exc_info = exc_info or sys.exc_info() self._notify_listeners('on_task_failure') - if self.check.is_performance_check(): - self._perflogger.log_performance(logging.INFO, self, - multiline=self._perflog_compat) + self._perflogger.log_performance(logging.INFO, self, + multiline=self._perflog_compat) def skip(self, exc_info=None): self._skipped = True diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 84fbe62b04..bab919ce94 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -989,6 +989,24 @@ def perf1(self): return _MyTest() +@pytest.fixture +def failing_perf_test(): + class _MyFailingTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo perf0=100' + + @sanity_function + def validate(self): + return False + + @performance_function('unit0') + def perf0(self): + return sn.extractsingle(r'perf0=(\S+)', self.stdout, 1, float) + + return _MyFailingTest() + + @pytest.fixture def simple_test(): class _MySimpleTest(rfm.RunOnlyRegressionTest): @@ -1192,7 +1210,8 @@ def test_perf_logging_no_perfvars(make_runner, make_exec_ctx, perf_test, def test_perf_logging_multiline(make_runner, make_exec_ctx, perf_test, - simple_test, config_perflog, tmp_path): + simple_test, failing_perf_test, + config_perflog, tmp_path): make_exec_ctx( config_perflog( fmt=( @@ -1207,7 +1226,9 @@ def test_perf_logging_multiline(make_runner, make_exec_ctx, perf_test, ) logging.configure_logging(rt.runtime().site_config) runner = make_runner() - testcases = executors.generate_testcases([perf_test, simple_test]) + testcases = executors.generate_testcases( + [perf_test, simple_test, failing_perf_test] + ) runner.runall(testcases) logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest.log'