Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions reframe/core/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -726,6 +726,9 @@ def _update_check_extras(self):
)

def log_performance(self, level, task, msg=None, multiline=False):
if self.extra['__rfm_check__'] is None:
return

self.extra['check_partition'] = task.testcase.partition.name
self.extra['check_environ'] = task.testcase.environ.name
self.extra['check_result'] = 'pass' if task.succeeded else 'fail'
Expand Down
10 changes: 4 additions & 6 deletions reframe/frontend/executors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,9 +386,8 @@ def finalize(self):

self._current_stage = 'finalize'
self._notify_listeners('on_task_success')
if self.check.is_performance_check():
self._perflogger.log_performance(logging.INFO, self,
multiline=self._perflog_compat)
self._perflogger.log_performance(logging.INFO, self,
multiline=self._perflog_compat)

@logging.time_function
def cleanup(self, *args, **kwargs):
Expand All @@ -398,9 +397,8 @@ def fail(self, exc_info=None):
self._failed_stage = self._current_stage
self._exc_info = exc_info or sys.exc_info()
self._notify_listeners('on_task_failure')
if self.check.is_performance_check():
self._perflogger.log_performance(logging.INFO, self,
multiline=self._perflog_compat)
self._perflogger.log_performance(logging.INFO, self,
multiline=self._perflog_compat)

def skip(self, exc_info=None):
self._skipped = True
Expand Down
25 changes: 23 additions & 2 deletions unittests/test_policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -989,6 +989,24 @@ def perf1(self):
return _MyTest()


@pytest.fixture
def failing_perf_test():
class _MyFailingTest(rfm.RunOnlyRegressionTest):
valid_systems = ['*']
valid_prog_environs = ['*']
executable = 'echo perf0=100'

@sanity_function
def validate(self):
return False

@performance_function('unit0')
def perf0(self):
return sn.extractsingle(r'perf0=(\S+)', self.stdout, 1, float)

return _MyFailingTest()


@pytest.fixture
def simple_test():
class _MySimpleTest(rfm.RunOnlyRegressionTest):
Expand Down Expand Up @@ -1192,7 +1210,8 @@ def test_perf_logging_no_perfvars(make_runner, make_exec_ctx, perf_test,


def test_perf_logging_multiline(make_runner, make_exec_ctx, perf_test,
simple_test, config_perflog, tmp_path):
simple_test, failing_perf_test,
config_perflog, tmp_path):
make_exec_ctx(
config_perflog(
fmt=(
Expand All @@ -1207,7 +1226,9 @@ def test_perf_logging_multiline(make_runner, make_exec_ctx, perf_test,
)
logging.configure_logging(rt.runtime().site_config)
runner = make_runner()
testcases = executors.generate_testcases([perf_test, simple_test])
testcases = executors.generate_testcases(
[perf_test, simple_test, failing_perf_test]
)
runner.runall(testcases)

logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyTest.log'
Expand Down