diff --git a/docs/tutorial.rst b/docs/tutorial.rst index 041c0e4d40..9594f1e618 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -399,7 +399,7 @@ The ``setup()`` method is now very simple: it gets the correct compilation flags from the ``prgenv_flags`` dictionary and applies them to the build system. .. literalinclude:: ../tutorial/example2.py - :lines: 1-4,36-63 + :lines: 1-4,36-62 .. tip:: A regression test is like any other Python class, so you can freely define your own attributes. @@ -600,7 +600,18 @@ Thresholds are specified as decimal fractions of the reference value. For nonneg In our example, the reference value for this test on ``daint:gpu`` is 50 Gflop/s ±10%. Setting a threshold value to :class:`None` disables the threshold. If you specify a measurement unit as well, you will be able to log it the performance logs of the test; this is handy when you are inspecting or plotting the performance values. +ReFrame will always add a default ``*`` entry in the ``reference`` dictionary, if it does not exist, with the reference value of ``(0, None, None, )``, where ``unit`` is derived from the unit of each respective performance variable. +This is useful when using ReFrame for benchmarking purposes and you would like to run a test on an unknown system. +.. note:: + Reference tuples may now optionally contain units. + + .. versionadded:: 2.16 + +.. note:: + A default ``*`` entry is now always added to the reference dictionary. + + .. versionadded:: 2.19 Combining It All Together ------------------------- diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index b5f497f297..a8ae56579c 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1127,6 +1127,39 @@ def check_performance(self): return with os_ext.change_dir(self._stagedir): + # Check if default reference perf values are provided and + # store all the variables tested in the performance check + has_default = False + variables = set() + for key, ref in self.reference.items(): + keyparts = key.split(self.reference.scope_separator) + system = keyparts[0] + varname = keyparts[-1] + try: + unit = ref[3] + except IndexError: + unit = None + + variables.add((varname, unit)) + if system == '*': + has_default = True + break + + if not has_default: + if not variables: + # If empty, it means that self.reference was empty, so try + # to infer their name from perf_patterns + variables = {(name, None) + for name in self.perf_patterns.keys()} + + for var in variables: + name, unit = var + ref_tuple = (0, None, None) + if unit: + ref_tuple += (unit,) + + self.reference.update({'*': {name: ref_tuple}}) + # We first evaluate and log all performance values and then we # check them against the reference. This way we always log them # even if the don't meet the reference. diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 03fa947e68..d65bd9a5dc 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -578,11 +578,20 @@ def test_unknown_system(self): self.test.reference = { 'testsys:login': { 'value1': (1.4, -0.1, 0.1), - 'value2': (1.7, -0.1, 0.1), 'value3': (3.1, -0.1, 0.1), + }, + 'testsys:login2': { + 'value2': (1.7, -0.1, 0.1) } } - self.assertRaises(SanityError, self.test.check_performance) + self.test.check_performance() + + def test_empty_reference(self): + self.write_performance_output(performance1=1.3, + performance2=1.8, + performance3=3.3) + self.test.reference = {} + self.test.check_performance() def test_default_reference(self): self.write_performance_output(performance1=1.3,