Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 28 additions & 6 deletions reframe/core/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,6 +673,18 @@ def pipeline_hooks(cls):
# FIXME: There is not way currently to express tuples of `float`s or
# `None`s, so we just use the very generic `object`

#: Require that a reference is defined for each system that this test is
#: run on.
#:
#: If this is set and a reference is not found for the current system, the
#: test will fail.
#:
#: :type: boolean
#: :default: :const:`False`
#:
#: .. versionadded:: 4.0.0
require_reference = variable(typ.Bool, value=False)

#:
#: Refer to the :doc:`ReFrame Tutorials </tutorials>` for concrete usage
#: examples.
Expand Down Expand Up @@ -898,7 +910,8 @@ def pipeline_hooks(cls):
#: responsibility to check whether the build phase failed by adding an
#: appropriate sanity check.
#:
#: :type: boolean : :default: :class:`True`
#: :type: boolean
#: :default: :class:`True`
build_locally = variable(typ.Bool, value=True, loggable=True)

def __new__(cls, *args, **kwargs):
Expand Down Expand Up @@ -2110,6 +2123,13 @@ def check_performance(self):
# Pop the unit from the ref tuple (redundant)
ref = ref[:3]
except KeyError:
if self.require_reference:
raise PerformanceError(
f'no reference value found for '
f'performance variable {tag!r} on '
f'system {self._current_partition.fullname!r}'
) from None

ref = (0, None, None)

self._perfvalues[key] = (value, *ref, unit)
Expand Down Expand Up @@ -2143,7 +2163,8 @@ def check_performance(self):
for var in variables:
name, unit = var
ref_tuple = (0, None, None, unit)
self.reference.update({'*': {name: ref_tuple}})
if not self.require_reference:
self.reference.update({'*': {name: ref_tuple}})

# We first evaluate and log all performance values and then we
# check them against the reference. This way we always log them
Expand All @@ -2152,9 +2173,10 @@ def check_performance(self):
value = sn.evaluate(expr)
key = f'{self._current_partition.fullname}:{tag}'
if key not in self.reference:
raise SanityError(
f'tag {tag!r} not resolved in references for '
f'{self._current_partition.fullname}'
raise PerformanceError(
f'no reference value found for '
f'performance variable {tag!r} on '
f'system {self._current_partition.fullname!r}'
)

self._perfvalues[key] = (value, *self.reference[key])
Expand All @@ -2181,7 +2203,7 @@ def check_performance(self):
'expected {1} (l={2}, u={3})' % tag))
)
except SanityError as e:
raise PerformanceError(e)
raise PerformanceError(e) from None

def _copy_job_files(self, job, dst):
if job is None:
Expand Down
66 changes: 65 additions & 1 deletion unittests/test_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -1315,6 +1315,49 @@ def __init__(self):
yield MyTest()


@pytest.fixture
def dummytest_modern(testsys_exec_ctx, perf_file, sanity_file):
'''Modern version of the dummytest above'''

class MyTest(rfm.RunOnlyRegressionTest):
perf_file = perf_file
reference = {
'testsys': {
'value1': (1.4, -0.1, 0.1, None),
'value2': (1.7, -0.1, 0.1, None),
},
'testsys:gpu': {
'value3': (3.1, -0.1, 0.1, None),
}
}

@sanity_function
def validate(self):
return sn.assert_found(r'result = success', sanity_file)

@performance_function('unit')
def value1(self):
return sn.extractsingle(r'perf1 = (\S+)', perf_file, 1, float)

@performance_function('unit')
def value2(self):
return sn.extractsingle(r'perf2 = (\S+)', perf_file, 1, float)

@performance_function('unit')
def value3(self):
return sn.extractsingle(r'perf3 = (\S+)', perf_file, 1, float)

yield MyTest()


@pytest.fixture(params=['classic', 'modern'])
def dummy_perftest(request, dummytest, dummytest_modern):
if request.param == 'modern':
return dummytest_modern
else:
return dummytest


def test_sanity_success(dummytest, sanity_file, perf_file, dummy_gpu_exec_ctx):
sanity_file.write_text('result = success\n')
perf_file.write_text('perf1 = 1.3\n'
Expand Down Expand Up @@ -1390,7 +1433,7 @@ def test_reference_unknown_tag(dummytest, sanity_file,
'foo': (3.1, -0.1, 0.1, None),
}
}
with pytest.raises(SanityError):
with pytest.raises(PerformanceError):
_run_sanity(dummytest, *dummy_gpu_exec_ctx)


Expand Down Expand Up @@ -1456,6 +1499,27 @@ def test_reference_tag_resolution(dummytest, sanity_file,
_run_sanity(dummytest, *dummy_gpu_exec_ctx)


def test_required_reference(dummy_perftest, sanity_file,
perf_file, dummy_gpu_exec_ctx):
sanity_file.write_text('result = success\n')
perf_file.write_text('perf1 = 1.3\n'
'perf2 = 1.8\n'
'perf3 = 3.3\n')

dummy_perftest.require_reference = True
dummy_perftest.reference = {
'testsys:login': {
'value1': (1.4, -0.1, 0.1, None),
'value3': (3.1, -0.1, 0.1, None),
},
'foo': {
'value2': (1.7, -0.1, 0.1, None)
}
}
with pytest.raises(PerformanceError):
_run_sanity(dummy_perftest, *dummy_gpu_exec_ctx)


def test_performance_invalid_value(dummytest, sanity_file,
perf_file, dummy_gpu_exec_ctx):
sanity_file.write_text('result = success\n')
Expand Down