From 83254e974b0c9bce8129b3d0e3bdebd67ed4e10b Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 30 Aug 2021 15:49:28 +0200 Subject: [PATCH 01/11] Update basic tutorials --- docs/tutorial_basics.rst | 96 ++++++++++++++++------------ tutorials/basics/hello/hello1.py | 6 +- tutorials/basics/hello/hello2.py | 6 +- tutorials/basics/hellomp/hellomp1.py | 6 +- tutorials/basics/hellomp/hellomp2.py | 10 +-- tutorials/basics/hellomp/hellomp3.py | 10 +-- tutorials/basics/stream/stream1.py | 36 ++++++----- tutorials/basics/stream/stream2.py | 33 ++++++---- tutorials/basics/stream/stream3.py | 31 +++++---- 9 files changed, 130 insertions(+), 104 deletions(-) diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index b0cb6a698e..b5244a4010 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -67,12 +67,10 @@ In this particular test we set both these attributes to ``['*']``, essentially a A ReFrame test must either define an executable to execute or a source file (or source code) to be compiled. In this example, it is enough to define the source file of our hello program. ReFrame knows the executable that was produced and will use that to run the test. -In this example, we redirect the executable's output into a file by defining the optional variable :attr:`~reframe.core.pipeline.RegressionTest.executable_opts`. -This output redirection is not strictly necessary and it is just done here to keep this first example as intuitive as possible. -Finally, each regression test must always define the :attr:`~reframe.core.pipeline.RegressionTest.sanity_patterns` attribute. -This is a `lazily evaluated `__ expression that asserts the sanity of the test. -In this particular case, we ask ReFrame to check that the executable has produced the desired phrase into the output file ``hello.out``. +Finally, every regression test must always decorate a member function as the test's :func:`@sanity_function`. +This decorated function is converted into a `lazily evaluated `__ expression that asserts the sanity of the test. +In this particular case, the specified sanity function checks that the executable has produced the desired phrase into the test's standard output :attr:`~reframe.core.pipeline.RegressionTest.stdout`. Note that ReFrame does not determine the success of a test by its exit code. Instead, the assessment of success is responsibility of the test itself. @@ -233,7 +231,7 @@ ReFrame allows you to avoid this in several ways but the most compact is to defi :lines: 6- -This test extends the ``hello1.py`` test by defining the ``lang`` parameter with the :py:func:`~reframe.core.pipeline.RegressionTest.parameter` built-in. +This test extends the ``hello1.py`` test by defining the ``lang`` parameter with the :py:func:`~reframe.core.pipeline.RegressionMixin.parameter` built-in. This parameter will cause as many instantiations as parameter values available, each one setting the :attr:`lang` attribute to one single value. Hence, this example will create two test instances, one with ``lang='c'`` and another with ``lang='cpp'``. The parameter is available as an attribute of the test instance and, in this example, we use it to set the extension of the source file. @@ -254,7 +252,7 @@ This is exactly what we want to do here, and we know that the test sources are n Hence, we move the ``sourcepath`` assignment into a pre-compile hook. .. literalinclude:: ../tutorials/basics/hello/hello2.py - :lines: 19- + :lines: 17-19 The use of hooks is covered in more detail later on, but for now, let's just think of them as a way to defer the execution of a function to a given stage of the test's pipeline. By using hooks, any user could now derive from this class and attach other hooks (for example, adding some compiler flags) without having to worry about overriding the base method that sets the ``sourcepath`` variable. @@ -469,15 +467,8 @@ In this case, we set the :func:`set_compile_flags` hook to run before the ReFram The pipeline hooks, as well as the regression test pipeline itself, are covered in more detail later on in the tutorial. -In this example, the generated executable takes a single argument which sets the number of threads that will be used. -As seen in the previous examples, executable options are defined with the :attr:`executable_opts ` variable, and here is set to ``'16'``. -Also, the reader may notice that this example no longer redirects the standard output of the executable into a file as the previous examples did. -Instead, just with the purpose of keeping the :attr:`executable_opts ` simple, we use ReFrame's internal mechanism to process the standard output of the executable. -Similarly to the parameters and the compiler settings, the output of a test is private to each of the instances of the :class:`HelloThreadedTest` class. -So, instead of inspecting an external file to evaluate the sanity of the test, we can just set our sanity function to inspect this attribute that contains the test's standard output. -This output is stored under :attr:`self.stdout` and is populated only after the executable has run. -Therefore, we can set the :attr:`~reframe.core.pipeline.RegressionTest.sanity_patterns` with the :func:`set_sanity_patterns` pipeline hook that is scheduled to run before the ``sanity`` pipeline stage. -Again, pipeline stages will be covered detail further on, so for now, just think of this ``sanity`` stage as a step that occurs after the test's executable is run. +In this example, the generated executable takes a single argument which sets the number of threads to be used. +The options passed to the test's executable can be set throught the :attr:`executable_opts ` variable, which in this case is set to ``'16'``. Let's run the test now: @@ -552,12 +543,10 @@ In the following we write a more robust sanity check that can catch this havoc. More advanced sanity checking ----------------------------- -Sanity checking of a test's outcome is quite powerful in ReFrame. -So far, we have seen only a ``grep``-like search for a string in the output, but ReFrame's ``sanity_patterns`` are much more capable than this. -In fact, you can practically do almost any operation in the output and process it as you would like before assessing the test's sanity. -The syntax feels also quite natural since it is fully integrated in Python. - -In the following we extend the sanity checking of the multithreaded "Hello, World!", such that not only the output pattern we are looking for is more restrictive, but also we check that all the threads produce a greetings line. See the highlighted lines in the modified version of the ``set_sanity_patterns`` pipeline hook. +So far, we have seen only a ``grep``-like search for a string in the test's :attr:`~reframe.core.pipeline.RegressionTest.stdout`, but ReFrame's :attr:`@sanity_function` are much more capable than this. +In fact, one could practically do almost any operation in the output and process it as you would like before assessing the test's sanity. +In the following, we extend the sanity checking of the above multithreaded "Hello, World!" to assert that all the threads produce a greetings line. +See the highlighted lines below in the modified version of the :attr:`@sanity_function`. .. code-block:: console @@ -568,15 +557,11 @@ In the following we extend the sanity checking of the multithreaded "Hello, Worl :lines: 6- :emphasize-lines: 22-24 -The sanity checking is straightforward. -We find all the matches of the required pattern, we count them and finally we check their number. -Both statements here are lazily evaluated. -They will not be executed where they appear, but rather at the sanity checking phase. -ReFrame provides lazily evaluated counterparts for most of the builtin Python functions, such the :func:`len` function here. -Also whole expressions can be lazily evaluated if one of the operands is deferred, as is the case in this example with the assignment to ``num_messages``. -This makes the sanity checking mechanism quite powerful and straightforward to reason about, without having to rely on complex pattern matching techniques. -:doc:`deferrable_functions_reference` provides a complete reference of the sanity functions provided by ReFrame, but users can also define their own, as described in :doc:`deferrables`. - +This new :attr:`@sanity_function` counts all the pattern matches in the tests's :attr:`~reframe.core.pipeline.RegressionTest.stdout` and checks that this count matches the expected value. +The execution of the function :func:`assert_num_messages` is deferred to the ``sanity`` stage in the test's pipeline, after the executable has run and the :attr:`~reframe.core.pipeline.RegressionTest.stdout` file has been populated. +In this example, we have used the :func:`~reframe.utility.sanity.findall` utility function from the :mod:`~reframe.utility.sanity` module to conveniently extract the pattern matches. +This module provides a broad range of utility functions that can be used to compose more complex sanity checks. +However, note that the utility functions in this module are lazily evaluated expressions or `deferred expressions` which must be evaluated either implicitly or explicitly (see :doc:`deferrable_functions_reference`). Let's run this version of the test now and see if it fails: @@ -659,7 +644,7 @@ Writing A Performance Test -------------------------- An important aspect of regression testing is checking for performance regressions. -In this example, we will write a test that downloads the `STREAM `__ benchmark, compiles it, runs it and records its performance. +In this example, we write a test that downloads the `STREAM `__ benchmark, compiles it, runs it and records its performance. In the test below, we highlight the lines that introduce new concepts. .. code-block:: console @@ -669,7 +654,7 @@ In the test below, we highlight the lines that introduce new concepts. .. literalinclude:: ../tutorials/basics/stream/stream1.py :lines: 6- - :emphasize-lines: 9-11,14-17,29-40 + :emphasize-lines: 9-11,14-17,28- First of all, notice that we restrict the programming environments to ``gnu`` only, since this test requires OpenMP, which our installation of Clang does not have. The next thing to notice is the :attr:`~reframe.core.pipeline.RegressionTest.prebuild_cmds` attribute, which provides a list of commands to be executed before the build step. @@ -678,9 +663,13 @@ In this case, we just fetch the source code of the benchmark. For running the benchmark, we need to set the OpenMP number of threads and pin them to the right CPUs through the ``OMP_NUM_THREADS`` and ``OMP_PLACES`` environment variables. You can set environment variables in a ReFrame test through the :attr:`~reframe.core.pipeline.RegressionTest.variables` dictionary. -What makes a ReFrame test a performance test is the definition of the :attr:`~reframe.core.pipeline.RegressionTest.perf_patterns` attribute. -This is a dictionary where the keys are *performance variables* and the values are lazily evaluated expressions for extracting the performance variable values from the test's output. -In this example, we extract four performance variables, namely the memory bandwidth values for each of the "Copy", "Scale", "Add" and "Triad" sub-benchmarks of STREAM and we do so by using the :func:`~reframe.utility.sanity.extractsingle` sanity function. +What makes a ReFrame test a performance test is the definition of at least one :ref:`performance function`. +Similarly to a test's :func:`@sanity_function`, a performance function is simply a member function decorated with the :attr:`@performance_function` decorator, which is responsible for extracting a specified performance quantity from a regression test. +The :attr:`@performance_function` decorator must be passed the units of the quantity to be extracted, and it also takes the optional argument ``perf_key`` to customize the name of the extracted performance variable. +If ``perf_key`` is not provided, the performance variable will take the name of the decorated performance function. + +ReFrame identifies all member functions that use the :attr:`@performance_function` decorator, and will automatically schedule them for execution during the ``performance`` pipeline stage of the test. +In this example, we extract four performance variables, namely the memory bandwidth values for each of the "Copy", "Scale", "Add" and "Triad" sub-benchmarks of STREAM, where each of the performance functions use the :func:`~reframe.utility.sanity.extractsingle` utility function. For each of the sub-benchmarks we extract the "Best Rate MB/s" column of the output (see below) and we convert that to a float. .. code-block:: none @@ -734,10 +723,10 @@ The :option:`--performance-report` will generate a short report at the end for e - catalina:default - gnu * num_tasks: 1 - * Copy: 24326.7 None - * Scale: 16664.2 None - * Add: 18398.7 None - * Triad: 18930.6 None + * Copy: 24326.7 MB/s + * Scale: 16664.2 MB/s + * Add: 18398.7 MB/s + * Triad: 18930.6 MB/s ------------------------------------------------------------------------------ Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-gczplnic.log' @@ -746,8 +735,9 @@ The :option:`--performance-report` will generate a short report at the end for e Adding reference values ----------------------- -A performance test would not be so meaningful, if we couldn't test the obtained performance against a reference value. -ReFrame offers the possibility to set references for each of the performance variables defined in a test and also set different references for different systems. +On its current state, the above STREAM performance test will simply extract and report the performance variables regardless of the actual performance values. +However, in some situations, it might be useful to check that the extracted performance values are within an expected range, and report a failure whenever a test performs below expectations. +To this end, ReFrame tests include the :attr:`~reframe.core.pipeline.RegressionTest.reference` variable, which enables setting references for each of the performance variables defined in a test and also set different references for different systems. In the following example, we set the reference values for all the STREAM sub-benchmarks for the system we are currently running on. .. note:: @@ -789,6 +779,28 @@ If any obtained performance value is beyond its respective thresholds, the test * Rerun with '-n StreamWithRefTest -p gnu --system catalina:default' * Reason: performance error: failed to meet reference: Copy=24586.5, expected 55200 (l=52440.0, u=57960.0) +Also, note how the performance syntax for this example is far more compact in comparison to our first iteration of the STREAM test. +In that first STREAM example, all four performance functions were almost identical, except for a small part of the regex pattern, which led to a lot of code repetition. +Hence, this example collapses all four performance functions into a single performance function, which now takes an optional argument to select the quantity to extract. +Then, the performance variables of the test can be defined by setting the respective entries in the :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` dictionary. + +.. literalinclude:: ../tutorials/basics/stream/stream2.py + :lines: 41- + +.. note:: + Performance functions may also be generated inline using the :func:`~reframe.utility.sanity.make_performance_function` utility as shown below. + + .. code-block:: python + + @run_before('performance') + def set_perf_vars(self): + self.perf_variables = { + 'Copy': sn.make_performance_function( + sn.extractsingle(r'Copy:\s+(\S+)\s+.*', + self.stdout, 1, float), + 'MB/s' + ) + } ------------------------------ diff --git a/tutorials/basics/hello/hello1.py b/tutorials/basics/hello/hello1.py index c18d8c9a9d..f8d88e804f 100644 --- a/tutorials/basics/hello/hello1.py +++ b/tutorials/basics/hello/hello1.py @@ -12,5 +12,7 @@ class HelloTest(rfm.RegressionTest): valid_systems = ['*'] valid_prog_environs = ['*'] sourcepath = 'hello.c' - executable_opts = ['> hello.out'] - sanity_patterns = sn.assert_found(r'Hello, World\!', 'hello.out') + + @sanity_function + def assert_hello(self): + return sn.assert_found(r'Hello, World\!', self.stdout) diff --git a/tutorials/basics/hello/hello2.py b/tutorials/basics/hello/hello2.py index 3d7aaf4dc3..a3208bb241 100644 --- a/tutorials/basics/hello/hello2.py +++ b/tutorials/basics/hello/hello2.py @@ -13,9 +13,11 @@ class HelloMultiLangTest(rfm.RegressionTest): valid_systems = ['*'] valid_prog_environs = ['*'] - executable_opts = ['> hello.out'] - sanity_patterns = sn.assert_found(r'Hello, World\!', 'hello.out') @run_before('compile') def set_sourcepath(self): self.sourcepath = f'hello.{self.lang}' + + @sanity_function + def assert_hello(self): + return sn.assert_found(r'Hello, World\!', self.stdout) diff --git a/tutorials/basics/hellomp/hellomp1.py b/tutorials/basics/hellomp/hellomp1.py index bfb3632262..561ee2e25d 100644 --- a/tutorials/basics/hellomp/hellomp1.py +++ b/tutorials/basics/hellomp/hellomp1.py @@ -22,6 +22,6 @@ def set_compilation_flags(self): if environ in {'clang', 'gnu'}: self.build_system.cxxflags += ['-pthread'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'Hello, World\!', self.stdout) + @sanity_function + def assert_hello(self): + return sn.assert_found(r'Hello, World\!', self.stdout) diff --git a/tutorials/basics/hellomp/hellomp2.py b/tutorials/basics/hellomp/hellomp2.py index 48fbdd66c2..6dcbbe7386 100644 --- a/tutorials/basics/hellomp/hellomp2.py +++ b/tutorials/basics/hellomp/hellomp2.py @@ -22,8 +22,8 @@ def set_compilation_flags(self): if environ in {'clang', 'gnu'}: self.build_system.cxxflags += ['-pthread'] - @run_before('sanity') - def set_sanity_patterns(self): - num_messages = sn.len(sn.findall(r'\[\s?\d+\] Hello, World\!', - self.stdout)) - self.sanity_patterns = sn.assert_eq(num_messages, 16) + @sanity_function + def assert_num_messages(self): + num_messages = len(sn.findall(r'\[\s?\d+\] Hello, World\!', + self.stdout)) + return num_messages == 16 diff --git a/tutorials/basics/hellomp/hellomp3.py b/tutorials/basics/hellomp/hellomp3.py index bc91e6efb8..956f1906fd 100644 --- a/tutorials/basics/hellomp/hellomp3.py +++ b/tutorials/basics/hellomp/hellomp3.py @@ -23,8 +23,8 @@ def set_compilation_flags(self): if environ in {'clang', 'gnu'}: self.build_system.cxxflags += ['-pthread'] - @run_before('sanity') - def set_sanity_patterns(self): - num_messages = sn.len(sn.findall(r'\[\s?\d+\] Hello, World\!', - self.stdout)) - self.sanity_patterns = sn.assert_eq(num_messages, 16) + @sanity_function + def assert_num_messages(self): + num_messages = len(sn.findall(r'\[\s?\d+\] Hello, World\!', + self.stdout)) + return num_messages == 16 diff --git a/tutorials/basics/stream/stream1.py b/tutorials/basics/stream/stream1.py index fd881fd1ad..1dc5a6c36a 100644 --- a/tutorials/basics/stream/stream1.py +++ b/tutorials/basics/stream/stream1.py @@ -26,20 +26,22 @@ def set_compiler_flags(self): self.build_system.cppflags = ['-DSTREAM_ARRAY_SIZE=$((1 << 25))'] self.build_system.cflags = ['-fopenmp', '-O3', '-Wall'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'Solution Validates', - self.stdout) - - @run_before('performance') - def set_perf_patterns(self): - self.perf_patterns = { - 'Copy': sn.extractsingle(r'Copy:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Scale': sn.extractsingle(r'Scale:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Add': sn.extractsingle(r'Add:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Triad': sn.extractsingle(r'Triad:\s+(\S+)\s+.*', - self.stdout, 1, float) - } + @sanity_function + def validate_solution(self): + return sn.assert_found(r'Solution Validates', self.stdout) + + @performance_function('MB/s', perf_key='Copy') + def extract_copy_perf(self): + return sn.extractsingle(r'Copy:\s+(\S+)\s+.*', self.stdout, 1, float) + + @performance_function('MB/s', perf_key='Scale') + def extract_scale_perf(self): + return sn.extractsingle(r'Scale:\s+(\S+)\s+.*', self.stdout, 1, float) + + @performance_function('MB/s', perf_key='Add') + def extract_add_perf(self): + return sn.extractsingle(r'Add:\s+(\S+)\s+.*', self.stdout, 1, float) + + @performance_function('MB/s', perf_key='Triad') + def extract_triad_perf(self): + return sn.extractsingle(r'Triad:\s+(\S+)\s+.*', self.stdout, 1, float) diff --git a/tutorials/basics/stream/stream2.py b/tutorials/basics/stream/stream2.py index 678ecd20e6..52aa740bd2 100644 --- a/tutorials/basics/stream/stream2.py +++ b/tutorials/basics/stream/stream2.py @@ -34,20 +34,25 @@ def set_compiler_flags(self): self.build_system.cppflags = ['-DSTREAM_ARRAY_SIZE=$((1 << 25))'] self.build_system.cflags = ['-fopenmp', '-O3', '-Wall'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'Solution Validates', - self.stdout) + @sanity_function + def validate_solution(self): + return sn.assert_found(r'Solution Validates', self.stdout) + + @performance_function('MB/s') + def extract_bw(self, kind='Copy'): + '''Generic performance extraction function.''' + if kind not in {'Copy', 'Scale', 'Add', 'Triad'}: + raise ValueError(f'illegal value in argument kind ({kind!r})') + + return sn.extractsingle(rf'{kind}:\s+(\S+)\s+.*', + self.stdout, 1, float) @run_before('performance') - def set_perf_patterns(self): - self.perf_patterns = { - 'Copy': sn.extractsingle(r'Copy:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Scale': sn.extractsingle(r'Scale:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Add': sn.extractsingle(r'Add:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Triad': sn.extractsingle(r'Triad:\s+(\S+)\s+.*', - self.stdout, 1, float) + def set_perf_variables(self): + '''Build the dictionary with all the performance variables.''' + self.perf_variables = { + 'Copy': self.extract_bw(), + 'Scale': self.extract_bw('Scale'), + 'Add': self.extract_bw('Add'), + 'Triad': self.extract_bw('Triad'), } diff --git a/tutorials/basics/stream/stream3.py b/tutorials/basics/stream/stream3.py index 285d12adcf..a1a3cc5b5e 100644 --- a/tutorials/basics/stream/stream3.py +++ b/tutorials/basics/stream/stream3.py @@ -60,20 +60,23 @@ def set_num_threads(self): 'OMP_PLACES': 'cores' } - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'Solution Validates', - self.stdout) + @sanity_function + def validate_solution(self): + return sn.assert_found(r'Solution Validates', self.stdout) + + @performance_function('MB/s') + def extract_bw(self, kind='Copy'): + if kind not in {'Copy', 'Scale', 'Add', 'Triad'}: + raise ValueError(f'illegal value in argument kind ({kind!r})') + + return sn.extractsingle(rf'{kind}:\s+(\S+)\s+.*', + self.stdout, 1, float) @run_before('performance') - def set_perf_patterns(self): - self.perf_patterns = { - 'Copy': sn.extractsingle(r'Copy:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Scale': sn.extractsingle(r'Scale:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Add': sn.extractsingle(r'Add:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'Triad': sn.extractsingle(r'Triad:\s+(\S+)\s+.*', - self.stdout, 1, float) + def set_perf_variables(self): + self.perf_variables = { + 'Copy': self.extract_bw(), + 'Scale': self.extract_bw('Scale'), + 'Add': self.extract_bw('Add'), + 'Triad': self.extract_bw('Triad'), } From bd2e562822f3f50ec8eefa36708a1dc08ffa9472 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 30 Aug 2021 16:46:37 +0200 Subject: [PATCH 02/11] Update advanced tutorials --- docs/tutorial_advanced.rst | 32 ++++++++----------- tutorials/advanced/affinity/affinity.py | 6 ++-- .../advanced/containers/container_test.py | 8 +++-- tutorials/advanced/flexnodes/flextest.py | 8 ++--- tutorials/advanced/jobopts/eatmemory.py | 12 +++---- tutorials/advanced/library/lib/__init__.py | 6 ++-- tutorials/advanced/makefiles/maketest.py | 14 ++++---- .../advanced/makefiles/maketest_mixin.py | 12 +++---- tutorials/advanced/multilaunch/multilaunch.py | 6 ++-- tutorials/advanced/parameterized/stream.py | 16 ++++------ tutorials/advanced/random/prepostrun.py | 6 ++-- tutorials/advanced/random/randint.py | 6 ++-- tutorials/advanced/runonly/echorand.py | 6 ++-- 13 files changed, 65 insertions(+), 73 deletions(-) diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index a5c72b97d8..e9477527fc 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -135,8 +135,8 @@ Let's have a look at the test itself: .. literalinclude:: ../tutorials/advanced/makefiles/maketest.py - :lines: 6-29 - :emphasize-lines: 18,22-24 + :lines: 6-27 + :emphasize-lines: 18,22 First, if you're using any build system other than ``SingleSource``, you must set the :attr:`executable` attribute of the test, because ReFrame cannot know what is the actual executable to be run. We then set the build system to :class:`~reframe.core.buildsystems.Make` and set the preprocessor flags as we would do with the :class:`SingleSource` build system. @@ -281,7 +281,7 @@ The following test is a compile-only version of the :class:`MakefileTest` presen .. literalinclude:: ../tutorials/advanced/makefiles/maketest.py - :lines: 32- + :lines: 30- :emphasize-lines: 2 What is worth noting here is that the standard output and standard error of the test, which are accessible through the :attr:`~reframe.core.pipeline.RegressionTest.stdout` and :attr:`~reframe.core.pipeline.RegressionTest.stderr` attributes, correspond now to the standard output and error of the compilation command. @@ -337,7 +337,7 @@ Notice how the parameters are expanded in each of the individual tests: Applying a Sanity Function Iteratively -------------------------------------- -It is often the case that a common sanity pattern has to be applied many times. +It is often the case that a common sanity function has to be applied many times. The following script prints 100 random integers between the limits given by the environment variables ``LOWER`` and ``UPPER``. .. code-block:: console @@ -372,7 +372,7 @@ There is still a small complication that needs to be addressed. As a direct replacement of the built-in :py:func:`all` function, ReFrame's :func:`~reframe.utility.sanity.all` sanity function returns :class:`True` for empty iterables, which is not what we want. So we must make sure that all 100 numbers are generated. This is achieved by the ``sn.assert_eq(sn.count(numbers), 100)`` statement, which uses the :func:`~reframe.utility.sanity.count` sanity function for counting the generated numbers. -Finally, we need to combine these two conditions to a single deferred expression that will be assigned to the test's :attr:`sanity_patterns`. +Finally, we need to combine these two conditions to a single deferred expression that will be returned by the test's :attr:`@sanity_function`. We accomplish this by using the :func:`~reframe.utility.sanity.all` sanity function. For more information about how exactly sanity functions work and how their execution is deferred, please refer to :doc:`deferrables`. @@ -458,7 +458,7 @@ Here is the test: .. literalinclude:: ../tutorials/advanced/jobopts/eatmemory.py - :lines: 6-23 + :lines: 6-25 :emphasize-lines: 12-14 Each ReFrame test has an associated `run job descriptor `__ which represents the scheduler job that will be used to run this test. @@ -625,7 +625,7 @@ It resembles a scaling test, except that all happens inside a single ReFrame tes .. literalinclude:: ../tutorials/advanced/multilaunch/multilaunch.py :lines: 6- - :emphasize-lines: 12-19 + :emphasize-lines: 13-19 The additional parallel launch commands are inserted in either the :attr:`prerun_cmds` or :attr:`postrun_cmds` lists. To retrieve the actual parallel launch command for the current partition that the test is running on, you can use the :func:`~reframe.core.launchers.Launcher.run_command` method of the launcher object. @@ -680,13 +680,9 @@ The test will verify that all the nodes print the expected host name: :lines: 6- :emphasize-lines: 10- -The first thing to notice in this test is that :attr:`~reframe.core.pipeline.RegressionTest.num_tasks` is set to zero. -This is a requirement for flexible tests. -The sanity check of this test simply counts the host names printed and verifies that they are as many as expected. -Notice, however, that the sanity check does not use :attr:`num_tasks` directly, but rather access the attribute through the :func:`~reframe.utility.sanity.getattr` sanity function, which is a replacement for the :func:`getattr` builtin. -The reason for that is that at the time the sanity check expression is created, :attr:`num_tasks` is ``0`` and it will only be set to its actual value during the run phase. -Consequently, we need to defer the attribute retrieval, thus we use the :func:`~reframe.utility.sanity.getattr` sanity function instead of accessing it directly - +The first thing to notice in this test is that :attr:`~reframe.core.pipeline.RegressionTest.num_tasks` is set to zero as default, which is a requirement for flexible tests. +However, this value is set to the actual number of tasks during the ``run`` pipeline stage. +Lastly, the sanity check of this test counts the host names printed and verifies that the total count equals :attr:`~reframe.core.pipeline.RegressionTest.num_tasks`. .. |--flex-alloc-nodes| replace:: :attr:`--flex-alloc-nodes` .. _--flex-alloc-nodes: manpage.html#cmdoption-flex-alloc-nodes @@ -723,7 +719,7 @@ The following parameterized test, will create two tests, one for each of the sup .. literalinclude:: ../tutorials/advanced/containers/container_test.py :lines: 6- - :emphasize-lines: 16-22 + :emphasize-lines: 11-19 A container-based test can be written as :class:`~reframe.core.pipeline.RunOnlyRegressionTest` that sets the :attr:`~reframe.core.pipeline.RegressionTest.container_platform` attribute. This attribute accepts a string that corresponds to the name of the container platform that will be used to run the container for this test. @@ -780,7 +776,7 @@ and ``/rfm_workdir`` corresponds to the stage directory on the host system. Therefore, the ``release.txt`` file can now be used in the subsequent sanity checks: .. literalinclude:: ../tutorials/advanced/containers/container_test.py - :lines: 15-17 + :lines: 26-29 For a complete list of the available attributes of a specific container platform, please have a look at the :ref:`container-platforms` section of the :doc:`regression_test_api` guide. @@ -793,8 +789,8 @@ Writing reusable tests .. versionadded:: 3.5.0 So far, all the examples shown above were tight to a particular system or configuration, which makes reusing these tests in other systems not straightforward. -However, the introduction of the :py:func:`~reframe.core.pipeline.RegressionTest.parameter` and :py:func:`~reframe.core.pipeline.RegressionTest.variable` ReFrame built-ins solves this problem, eliminating the need to specify any of the test variables in the :func:`__init__` method and simplifying code reuse. -Hence, readers who are not familiar with these built-in functions are encouraged to read their basic use examples (see :py:func:`~reframe.core.pipeline.RegressionTest.parameter` and :py:func:`~reframe.core.pipeline.RegressionTest.variable`) before delving any deeper into this tutorial. +However, the introduction of the :py:func:`~reframe.core.pipeline.RegressionMixin.parameter` and :py:func:`~reframe.core.pipeline.RegressionMixin.variable` ReFrame built-ins solves this problem, eliminating the need to specify any of the test variables in the :func:`__init__` method and simplifying code reuse. +Hence, readers who are not familiar with these built-in functions are encouraged to read their basic use examples (see :py:func:`~reframe.core.pipeline.RegressionMixin.parameter` and :py:func:`~reframe.core.pipeline.RegressionMixin.variable`) before delving any deeper into this tutorial. In essence, parameters and variables can be treated as simple class attributes, which allows us to leverage Python's class inheritance and write more modular tests. For simplicity, we illustrate this concept with the above :class:`ContainerTest` example, where the goal here is to re-write this test as a library that users can simply import from and derive their tests without having to rewrite the bulk of the test. diff --git a/tutorials/advanced/affinity/affinity.py b/tutorials/advanced/affinity/affinity.py index 4550c86713..00ee73f0db 100644 --- a/tutorials/advanced/affinity/affinity.py +++ b/tutorials/advanced/affinity/affinity.py @@ -23,6 +23,6 @@ def set_build_system_options(self): def set_cpu_binding(self): self.job.launcher.options = ['--cpu-bind=cores'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'CPU affinity', self.stdout) + @sanity_function + def validate_test(self): + return sn.assert_found(r'CPU affinity', self.stdout) diff --git a/tutorials/advanced/containers/container_test.py b/tutorials/advanced/containers/container_test.py index 821ecd82fb..def8e748df 100644 --- a/tutorials/advanced/containers/container_test.py +++ b/tutorials/advanced/containers/container_test.py @@ -13,9 +13,6 @@ class ContainerTest(rfm.RunOnlyRegressionTest): valid_systems = ['daint:gpu'] valid_prog_environs = ['builtin'] - os_release_pattern = r'18.04.\d+ LTS \(Bionic Beaver\)' - sanity_patterns = sn.assert_found(os_release_pattern, 'release.txt') - @run_before('run') def set_container_variables(self): self.descr = f'Run commands inside a container using {self.platform}' @@ -25,3 +22,8 @@ def set_container_variables(self): self.container_platform.command = ( "bash -c 'cat /etc/os-release | tee /rfm_workdir/release.txt'" ) + + @sanity_function + def assert_release(self): + os_release_pattern = r'18.04.\d+ LTS \(Bionic Beaver\)' + return sn.assert_found(os_release_pattern, 'release.txt') diff --git a/tutorials/advanced/flexnodes/flextest.py b/tutorials/advanced/flexnodes/flextest.py index 3fd002b454..a32f03ba0a 100644 --- a/tutorials/advanced/flexnodes/flextest.py +++ b/tutorials/advanced/flexnodes/flextest.py @@ -15,9 +15,9 @@ class HostnameCheck(rfm.RunOnlyRegressionTest): num_tasks = 0 num_tasks_per_node = 1 - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_eq( - sn.getattr(self, 'num_tasks'), + @sanity_function + def validate_test(self): + return sn.assert_eq( + self.num_tasks, sn.count(sn.findall(r'^nid\d+$', self.stdout)) ) diff --git a/tutorials/advanced/jobopts/eatmemory.py b/tutorials/advanced/jobopts/eatmemory.py index 81753058eb..84464943dc 100644 --- a/tutorials/advanced/jobopts/eatmemory.py +++ b/tutorials/advanced/jobopts/eatmemory.py @@ -18,9 +18,9 @@ class MemoryLimitTest(rfm.RegressionTest): def set_memory_limit(self): self.job.options = ['--mem=1000'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found( + @sanity_function + def validate_test(self): + return sn.assert_found( r'(exceeded memory limit)|(Out Of Memory)', self.stderr ) @@ -35,8 +35,8 @@ class MemoryLimitWithResourcesTest(rfm.RegressionTest): 'memory': {'size': '1000'} } - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found( + @sanity_function + def validate_test(self): + return sn.assert_found( r'(exceeded memory limit)|(Out Of Memory)', self.stderr ) diff --git a/tutorials/advanced/library/lib/__init__.py b/tutorials/advanced/library/lib/__init__.py index 3c3b7cf651..c50927cefb 100644 --- a/tutorials/advanced/library/lib/__init__.py +++ b/tutorials/advanced/library/lib/__init__.py @@ -42,9 +42,9 @@ def os_release_pattern(self): name = self.dist_name[self.dist] return rf'{self.dist}.\d+ LTS \({name}\)' - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.all([ + @sanity_function + def assert_release(self): + return sn.all([ sn.assert_found(self.os_release_pattern, 'release.txt'), sn.assert_found(self.os_release_pattern, self.stdout) ]) diff --git a/tutorials/advanced/makefiles/maketest.py b/tutorials/advanced/makefiles/maketest.py index ebf617eae6..9f8db78c92 100644 --- a/tutorials/advanced/makefiles/maketest.py +++ b/tutorials/advanced/makefiles/maketest.py @@ -22,11 +22,9 @@ class MakefileTest(rfm.RegressionTest): def set_compiler_flags(self): self.build_system.cppflags = [f'-DELEM_TYPE={self.elem_type}'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found( - rf'Result \({self.elem_type}\):', self.stdout - ) + @sanity_function + def validate_test(self): + return sn.assert_found(rf'Result \({self.elem_type}\):', self.stdout) @rfm.simple_test @@ -41,6 +39,6 @@ class MakeOnlyTest(rfm.CompileOnlyRegressionTest): def set_compiler_flags(self): self.build_system.cppflags = [f'-DELEM_TYPE={self.elem_type}'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_not_found(r'warning', self.stdout) + @sanity_function + def validate_compilation(self): + return sn.assert_not_found(r'warning', self.stdout) diff --git a/tutorials/advanced/makefiles/maketest_mixin.py b/tutorials/advanced/makefiles/maketest_mixin.py index 04968a0a7d..125cf18b30 100644 --- a/tutorials/advanced/makefiles/maketest_mixin.py +++ b/tutorials/advanced/makefiles/maketest_mixin.py @@ -24,9 +24,9 @@ class MakefileTestAlt(rfm.RegressionTest, ElemTypeParam): def set_compiler_flags(self): self.build_system.cppflags = [f'-DELEM_TYPE={self.elem_type}'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found( + @sanity_function + def validate_test(self): + return sn.assert_found( rf'Result \({self.elem_type}\):', self.stdout ) @@ -42,6 +42,6 @@ class MakeOnlyTestAlt(rfm.CompileOnlyRegressionTest, ElemTypeParam): def set_compiler_flags(self): self.build_system.cppflags = [f'-DELEM_TYPE={self.elem_type}'] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_not_found(r'warning', self.stdout) + @sanity_function + def validate_build(self): + return sn.assert_not_found(r'warning', self.stdout) diff --git a/tutorials/advanced/multilaunch/multilaunch.py b/tutorials/advanced/multilaunch/multilaunch.py index a7abfa2892..74f0776e4d 100644 --- a/tutorials/advanced/multilaunch/multilaunch.py +++ b/tutorials/advanced/multilaunch/multilaunch.py @@ -23,8 +23,8 @@ def pre_launch(self): for n in range(1, self.num_tasks) ] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_eq( + @sanity_function + def validate_test(self): + return sn.assert_eq( sn.count(sn.extractall(r'^nid\d+', self.stdout)), 10 ) diff --git a/tutorials/advanced/parameterized/stream.py b/tutorials/advanced/parameterized/stream.py index 28715be17f..2126fb1e31 100644 --- a/tutorials/advanced/parameterized/stream.py +++ b/tutorials/advanced/parameterized/stream.py @@ -71,14 +71,10 @@ def set_num_threads(self): 'OMP_PLACES': 'cores' } - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'Solution Validates', - self.stdout) + @sanity_function + def validate_solution(self): + return sn.assert_found(r'Solution Validates', self.stdout) - @run_before('performance') - def set_perf_patterns(self): - self.perf_patterns = { - 'Triad': sn.extractsingle(r'Triad:\s+(\S+)\s+.*', - self.stdout, 1, float), - } + @performance_function('MB/s', perf_key='Triad') + def extract_triad_bw(self): + return sn.extractsingle(r'Triad:\s+(\S+)\s+.*', self.stdout, 1, float) diff --git a/tutorials/advanced/random/prepostrun.py b/tutorials/advanced/random/prepostrun.py index 8634d767c1..77266ea9d2 100644 --- a/tutorials/advanced/random/prepostrun.py +++ b/tutorials/advanced/random/prepostrun.py @@ -16,12 +16,12 @@ class PrepostRunTest(rfm.RunOnlyRegressionTest): postrun_cmds = ['echo FINISHED'] executable = './random_numbers.sh' - @run_before('sanity') - def set_sanity_patterns(self): + @sanity_function + def validate_test(self): numbers = sn.extractall( r'Random: (?P\S+)', self.stdout, 'number', float ) - self.sanity_patterns = sn.all([ + return sn.all([ sn.assert_eq(sn.count(numbers), 100), sn.all(sn.map(lambda x: sn.assert_bounded(x, 90, 100), numbers)), sn.assert_found(r'FINISHED', self.stdout) diff --git a/tutorials/advanced/random/randint.py b/tutorials/advanced/random/randint.py index 21f97a889d..642dcf10d3 100644 --- a/tutorials/advanced/random/randint.py +++ b/tutorials/advanced/random/randint.py @@ -14,12 +14,12 @@ class DeferredIterationTest(rfm.RunOnlyRegressionTest): valid_prog_environs = ['*'] executable = './random_numbers.sh' - @run_before('sanity') - def set_sanity_patterns(self): + @sanity_function + def validate_test(self): numbers = sn.extractall( r'Random: (?P\S+)', self.stdout, 'number', float ) - self.sanity_patterns = sn.all([ + return sn.all([ sn.assert_eq(sn.count(numbers), 100), sn.all(sn.map(lambda x: sn.assert_bounded(x, 90, 100), numbers)) ]) diff --git a/tutorials/advanced/runonly/echorand.py b/tutorials/advanced/runonly/echorand.py index 2c2e929a20..0b99c3596e 100644 --- a/tutorials/advanced/runonly/echorand.py +++ b/tutorials/advanced/runonly/echorand.py @@ -20,9 +20,9 @@ class EchoRandTest(rfm.RunOnlyRegressionTest): f'$((RANDOM%({upper}+1-{lower})+{lower}))' ] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_bounded( + @sanity_function + def assert_solution(self): + return sn.assert_bounded( sn.extractsingle( r'Random: (?P\S+)', self.stdout, 'number', float ), From d22ad5446bac755fdcc88a1b447876d595462764 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 30 Aug 2021 16:52:37 +0200 Subject: [PATCH 03/11] Update dependencies tutorials --- docs/tutorial_deps.rst | 14 ++++----- tutorials/deps/osu_benchmarks.py | 53 ++++++++++++-------------------- 2 files changed, 26 insertions(+), 41 deletions(-) diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index 033543a68a..dbbe18b1e3 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -19,18 +19,18 @@ We first create a basic run-only test, that fetches the benchmarks: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 130- + :lines: 115- This test doesn't need any specific programming environment, so we simply pick the ``builtin`` environment in the ``login`` partition. The build tests would then copy the benchmark code and build it for the different programming environments: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 103-128 + :lines: 88-112 The only new thing that comes in with the :class:`OSUBuildTest` test is the following: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 110-112 + :lines: 95-97 Here we tell ReFrame that this test depends on a test named :class:`OSUDownloadTest`. This test may or may not be defined in the same test file; all ReFrame needs is the test name. @@ -46,7 +46,7 @@ The next step for the :class:`OSUBuildTest` is to set its :attr:`sourcesdir` to This is achieved with the following specially decorated function: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 114-119 + :lines: 99-104 The :func:`@require_deps ` decorator binds each argument of the decorated function to the corresponding target dependency. In order for the binding to work correctly the function arguments must be named after the target dependencies. @@ -62,7 +62,7 @@ For the next test we need to use the OSU benchmark binaries that we just built, Here is the relevant part: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 13-50 + :lines: 13-45 First, since we will have multiple similar benchmarks, we move all the common functionality to the :class:`OSUBenchmarkTestBase` base class. Again nothing new here; we are going to use two nodes for the benchmark and we set :attr:`sourcesdir ` to ``None``, since none of the benchmark tests will use any additional resources. @@ -79,7 +79,7 @@ The next step for the :class:`OSULatencyTest` is to set its executable to point This is achieved with the following specially decorated function: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 38-44 + :lines: 35-41 This concludes the presentation of the :class:`OSULatencyTest` test. The :class:`OSUBandwidthTest` is completely analogous. @@ -87,7 +87,7 @@ The :class:`OSUAllreduceTest` shown below is similar to the other two, except th It is essentially a scalability test that is running the ``osu_allreduce`` executable created by the :class:`OSUBuildTest` for 2, 4, 8 and 16 nodes. .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 76-100 + :lines: 66-85 The full set of OSU example tests is shown below: diff --git a/tutorials/deps/osu_benchmarks.py b/tutorials/deps/osu_benchmarks.py index b30638514a..256e6881b3 100644 --- a/tutorials/deps/osu_benchmarks.py +++ b/tutorials/deps/osu_benchmarks.py @@ -23,17 +23,14 @@ class OSUBenchmarkTestBase(rfm.RunOnlyRegressionTest): def set_dependencies(self): self.depends_on('OSUBuildTest', udeps.by_env) - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'^8', self.stdout) + @sanity_function + def validate_test(self): + return sn.assert_found(r'^8', self.stdout) @rfm.simple_test class OSULatencyTest(OSUBenchmarkTestBase): descr = 'OSU latency test' - reference = { - '*': {'latency': (0, None, None, 'us')} - } @require_deps def set_executable(self, OSUBuildTest): @@ -43,19 +40,14 @@ def set_executable(self, OSUBuildTest): ) self.executable_opts = ['-x', '100', '-i', '1000'] - @run_before('performance') - def set_perf_patterns(self): - self.perf_patterns = { - 'latency': sn.extractsingle(r'^8\s+(\S+)', self.stdout, 1, float) - } + @performance_function('us') + def latency(self): + return sn.extractsingle(r'^8\s+(\S+)', self.stdout, 1, float) @rfm.simple_test class OSUBandwidthTest(OSUBenchmarkTestBase): descr = 'OSU bandwidth test' - reference = { - '*': {'bandwidth': (0, None, None, 'MB/s')} - } @require_deps def set_executable(self, OSUBuildTest): @@ -65,21 +57,16 @@ def set_executable(self, OSUBuildTest): ) self.executable_opts = ['-x', '100', '-i', '1000'] - @run_before('performance') - def set_perf_patterns(self): - self.perf_patterns = { - 'bandwidth': sn.extractsingle(r'^4194304\s+(\S+)', - self.stdout, 1, float) - } + @performance_function('MB/s') + def bandwidth(self): + return sn.extractsingle(r'^4194304\s+(\S+)', + self.stdout, 1, float) @rfm.simple_test class OSUAllreduceTest(OSUBenchmarkTestBase): mpi_tasks = parameter(1 << i for i in range(1, 5)) descr = 'OSU Allreduce test' - reference = { - '*': {'latency': (0, None, None, 'us')} - } @run_after('init') def set_num_tasks(self): @@ -93,11 +80,9 @@ def set_executable(self, OSUBuildTest): ) self.executable_opts = ['-m', '8', '-x', '1000', '-i', '20000'] - @run_before('performance') - def set_perf_patterns(self): - self.perf_patterns = { - 'latency': sn.extractsingle(r'^8\s+(\S+)', self.stdout, 1, float) - } + @performance_function('us') + def latency(self): + return sn.extractsingle(r'^8\s+(\S+)', self.stdout, 1, float) @rfm.simple_test @@ -122,9 +107,9 @@ def set_sourcedir(self, OSUDownloadTest): def set_build_system_attrs(self): self.build_system.max_concurrency = 8 - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_not_found('error', self.stderr) + @sanity_function + def validate_build(self): + return sn.assert_not_found('error', self.stderr) @rfm.simple_test @@ -140,6 +125,6 @@ class OSUDownloadTest(rfm.RunOnlyRegressionTest): 'tar xzf osu-micro-benchmarks-5.6.2.tar.gz' ] - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_not_found('error', self.stderr) + @sanity_function + def validate_download(self): + return sn.assert_not_found('error', self.stderr) From 3193c52f1df132cb6d0f90082acdeb50956bda46 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 30 Aug 2021 17:24:35 +0200 Subject: [PATCH 04/11] Update tips and tricks --- docs/tutorial_tips_tricks.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/tutorial_tips_tricks.rst b/docs/tutorial_tips_tricks.rst index 3192d6d9ef..9132b1c527 100644 --- a/docs/tutorial_tips_tricks.rst +++ b/docs/tutorial_tips_tricks.rst @@ -90,7 +90,7 @@ As suggested by the warning message, passing :option:`-v` will give you the stac Debugging deferred expressions ============================== -Although deferred expression that are used in :attr:`sanity_patterns` and :attr:`perf_patterns` behave similarly to normal Python expressions, you need to understand their `implicit evaluation rules `__. +Although deferred expression that are used in sanity and performance functions behave similarly to normal Python expressions, you need to understand their `implicit evaluation rules `__. One of the rules is that :func:`str` triggers the implicit evaluation, so trying to use the standard :func:`print` function with a deferred expression, you might get unexpected results if that expression is not yet to be evaluated. For this reason, ReFrame offers a sanity function counterpart of :func:`print`, which allows you to safely print deferred expressions. @@ -116,9 +116,9 @@ Trying to use the standard print here :func:`print` function here would be of li def set_sourcepath(self): self.sourcepath = f'hello.{self.lang}' - @run_before('sanity') - def set_sanity_patterns(self): - self.sanity_patterns = sn.assert_found(r'Hello, World\!', sn.print(self.stdout)) + @sanity_function + def validate_output(self): + return sn.assert_found(r'Hello, World\!', sn.print(self.stdout)) If we run the test, we can see that the correct standard output filename will be printed after sanity: @@ -146,7 +146,7 @@ If we run the test, we can see that the correct standard output filename will be Debugging sanity and performance patterns ========================================= -When creating a new test that requires a complex output parsing for either the sanity or performance stages, setting the :attr:`sanity_patterns` and :attr:`perf_patterns` may involve some trial and error to debug the complex regular expressions required. +When creating a new test that requires a complex output parsing for either the ``sanity`` or ``performance`` pipeline stages, tunning the functions decorated by :attr:`@sanity_function` or :attr:`@performance_function` may involve some trial and error to debug the complex regular expressions required. For lightweight tests which execute in a few seconds, this trial and error may not be an issue at all. However, when dealing with tests which take longer to run, this method can quickly become tedious and inefficient. @@ -154,7 +154,7 @@ However, when dealing with tests which take longer to run, this method can quick When dealing with ``make``-based projects which take a long time to compile, you can use the command line option :option:`--dont-restage` in order to speed up the compile stage in subsequent runs. When a test fails, ReFrame will keep the test output in the stage directory after its execution, which means that one can load this output into a Python shell or another helper script without having to rerun the expensive test again. -If the test is not failing but the user still wants to experiment or modify the existing :attr:`~reframe.core.pipeline.RegressionTest.sanity_patterns` or :attr:`~reframe.core.pipeline.RegressionTest.perf_patterns`, the command line option :option:`--keep-stage-files` can be used when running ReFrame to avoid deleting the stage directory. +If the test is not failing but the user still wants to experiment or modify the existing sanity or performance functions, the command line option :option:`--keep-stage-files` can be used when running ReFrame to avoid deleting the stage directory. With the executable's output available in the stage directory, one can simply use the `re `_ module to debug regular expressions as shown below. .. code-block:: python @@ -168,7 +168,7 @@ With the executable's output available in the stage directory, one can simply us >>> # Evaluate the regular expression >>> re.find(the_regex_pattern, test_output) -Alternatively to using the `re `_ module, one could use all the sanity functions provided by ReFrame directly from the Python shell. +Alternatively to using the `re `_ module, one could use all the :mod:`~reframe.utility.sanity` utility provided by ReFrame directly from the Python shell. In order to do so, if ReFrame was installed manually using the ``bootstrap.sh`` script, one will have to make all the Python modules from the ``external`` directory accessible to the Python shell as shown below. .. code-block:: python @@ -190,7 +190,7 @@ Debugging test loading ====================== If you are new to ReFrame, you might wonder sometimes why your tests are not loading or why your tests are not running on the partition they were supposed to run. -This can be due to ReFrame picking the wrong configuration entry or that your test is not written properly (not decorated, no :attr:`valid_systems` etc.). +This can be due to ReFrame picking the wrong configuration entry or that your test is not written properly (not decorated, no :attr:`~reframe.core.pipeline.RegressionTest.valid_systems` etc.). If you try to load a test file and list its tests by increasing twice the verbosity level, you will get enough output to help you debug such issues. Let's try loading the ``tutorials/basics/hello/hello2.py`` file: From dc1748930af19927658df04b7c7c5a1a0fff63b9 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 30 Aug 2021 17:48:03 +0200 Subject: [PATCH 05/11] Remove other references to perf_patterns and sanity_patterns --- docs/configure.rst | 2 +- reframe/utility/sanity.py | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/configure.rst b/docs/configure.rst index b33f0c0e5d..f568aa8be4 100644 --- a/docs/configure.rst +++ b/docs/configure.rst @@ -194,7 +194,7 @@ You can view logger's log level as a general cut off. For example, if we have set it to ``warning``, no debug or informational messages would ever be printed. Finally, there is a special set of handlers for handling performance log messages. -Performance log messages are generated *only* for `performance tests `__, i.e., tests defining the :attr:`perf_patterns ` attribute. +Performance log messages are generated *only* for `performance tests `__, i.e., tests defining the :attr:`perf_variables ` attribute. The performance log handlers are stored in the ``handlers_perflog`` property. The ``filelog`` handler used in this example will create a file per test and per system/partition combination (``.///.log``) and will append to it the obtained performance data every time a performance test is run. Notice how the message to be logged is structured in the ``format`` property, such that it can be easily parsed from post processing tools. diff --git a/reframe/utility/sanity.py b/reframe/utility/sanity.py index 8cfd4607c1..f9f715728b 100644 --- a/reframe/utility/sanity.py +++ b/reframe/utility/sanity.py @@ -164,9 +164,11 @@ def print(obj, *, sep=' ', end='\n', file=None, flush=False): .. code:: python - self.sanity_patterns = sn.assert_eq( - sn.count(sn.print(sn.extract_all(...))), 10 - ) + @sanity_function + def my_sanity_fn(self): + return sn.assert_eq( + sn.count(sn.print(sn.extract_all(...))), 10 + ) If ``file`` is None, :func:`print` will print its arguments to the standard output. Unlike the builtin :func:`print() ` From 15f833df57114601d80ae7c3d7635c6d3e1bdcf1 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 30 Aug 2021 19:11:39 +0200 Subject: [PATCH 06/11] Bugfix hello OMP --- tutorials/basics/hellomp/hellomp2.py | 2 +- tutorials/basics/hellomp/hellomp3.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorials/basics/hellomp/hellomp2.py b/tutorials/basics/hellomp/hellomp2.py index 6dcbbe7386..d9ea0a6897 100644 --- a/tutorials/basics/hellomp/hellomp2.py +++ b/tutorials/basics/hellomp/hellomp2.py @@ -25,5 +25,5 @@ def set_compilation_flags(self): @sanity_function def assert_num_messages(self): num_messages = len(sn.findall(r'\[\s?\d+\] Hello, World\!', - self.stdout)) + self.stdout).evaluate()) return num_messages == 16 diff --git a/tutorials/basics/hellomp/hellomp3.py b/tutorials/basics/hellomp/hellomp3.py index 956f1906fd..a2d7ffcc17 100644 --- a/tutorials/basics/hellomp/hellomp3.py +++ b/tutorials/basics/hellomp/hellomp3.py @@ -26,5 +26,5 @@ def set_compilation_flags(self): @sanity_function def assert_num_messages(self): num_messages = len(sn.findall(r'\[\s?\d+\] Hello, World\!', - self.stdout)) + self.stdout).evaluate()) return num_messages == 16 From da60575cfbd788769cf4448bf5e2bea2ea3fdf37 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Wed, 1 Sep 2021 12:20:10 +0200 Subject: [PATCH 07/11] Address PR comments --- docs/tutorial_advanced.rst | 6 +++--- docs/tutorial_basics.rst | 6 +++--- docs/tutorial_tips_tricks.rst | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index e9477527fc..6db9dfc032 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -249,7 +249,7 @@ Writing a Run-Only Regression Test ---------------------------------- There are cases when it is desirable to perform regression testing for an already built executable. -In the following test we use simply the ``echo`` Bash shell command to print a random integer between specific lower and upper bounds: +In the following test we use simply the ``echo`` Bash shell command to print a random integer between specific lower and upper bounds. Here is the full regression test: .. code-block:: console @@ -492,7 +492,7 @@ Let's run the test and inspect the generated job script: The job options specified inside a ReFrame test are always the last to be emitted in the job script preamble and do not affect the options that are passed implicitly through other test attributes or configuration options. There is a small problem with this test though. -What if we change the job scheduler in that partition or what if we want to port the test to a different system that does not use Slurm and and another option is needed to achieve the same result. +What if we change the job scheduler in that partition or what if we want to port the test to a different system that does not use Slurm and another option is needed to achieve the same result. The obvious answer is to adapt the test, but is there a more portable way? The answer is yes and this can be achieved through so-called *extra resources*. ReFrame gives you the possibility to associate scheduler options to a "resource" managed by the partition scheduler. @@ -817,7 +817,7 @@ In fact, writing the test in this way permits having hooks that depend on undefi This is the case with the :func:`set_container_platform` hook, which depends on the undefined parameter ``platform``. Hence, the derived test **must** define all the required parameters and variables; otherwise ReFrame will notice that the test is not well defined and will raise an error accordingly. -Before moving onwards to the derived test, note that the :class:`ContainerBase` class takes the additional argument ``pin_prefix=True``, which locks the prefix of all derived tests to this base test. +Before moving ahead with the derived test, note that the :class:`ContainerBase` class takes the additional argument ``pin_prefix=True``, which locks the prefix of all derived tests to this base test. This will allow the retrieval of the sources located in the library by any derived test, regardless of what their containing directory is. .. code-block:: console diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index b5244a4010..7bbacc448e 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -352,13 +352,13 @@ Here is how the new configuration file looks like with the needed additions high Here we define a system named ``catalina`` that has one partition named ``default``. This partition makes no use of any `workload manager `__, but instead launches any jobs locally as OS processes. -Two programming environments are relevant for that partition, namely ``gnu`` and ``clang``, which are defined in the section :js:attr:`environments` of the configuration file. +Two programming environments are relevant for that partition, namely ``gnu`` and ``clang``, which are defined in the section :js:attr:`environments` of the `configuration file `__. The ``gnu`` programming environment provides GCC 9, whereas the ``clang`` one provides the Clang compiler from the system. Notice, how you can define the actual commands for invoking the C, C++ and Fortran compilers in each programming environment. As soon as a programming environment defines the different compilers, ReFrame will automatically pick the right compiler based on the source file extension. In addition to C, C++ and Fortran programs, ReFrame will recognize the ``.cu`` extension as well and will try to invoke the ``nvcc`` compiler for CUDA programs. -Finally, the new system that we defined may be identified by the hostname ``tresa`` (see the :js:attr:`hostnames` configuration parameter) and it will not use any environment modules system (see the :js:attr:`modules_system` configuration parameter). +Finally, the new system that we defined may be identified by the hostname ``tresa`` (see the :js:attr:`hostnames` `configuration parameter `__) and it will not use any environment modules system (see the :js:attr:`modules_system` `configuration parameter `__). The :js:attr:`hostnames` attribute will help ReFrame to automatically pick the right configuration when running on it. Notice, how the ``generic`` system matches any hostname, so that it acts as a fallback system. @@ -468,7 +468,7 @@ In this case, we set the :func:`set_compile_flags` hook to run before the ReFram In this example, the generated executable takes a single argument which sets the number of threads to be used. -The options passed to the test's executable can be set throught the :attr:`executable_opts ` variable, which in this case is set to ``'16'``. +The options passed to the test's executable can be set through the :attr:`executable_opts ` variable, which in this case is set to ``'16'``. Let's run the test now: diff --git a/docs/tutorial_tips_tricks.rst b/docs/tutorial_tips_tricks.rst index 9132b1c527..8171545661 100644 --- a/docs/tutorial_tips_tricks.rst +++ b/docs/tutorial_tips_tricks.rst @@ -146,7 +146,7 @@ If we run the test, we can see that the correct standard output filename will be Debugging sanity and performance patterns ========================================= -When creating a new test that requires a complex output parsing for either the ``sanity`` or ``performance`` pipeline stages, tunning the functions decorated by :attr:`@sanity_function` or :attr:`@performance_function` may involve some trial and error to debug the complex regular expressions required. +When creating a new test that requires a complex output parsing for either the ``sanity`` or ``performance`` pipeline stages, tuning the functions decorated by :attr:`@sanity_function` or :attr:`@performance_function` may involve some trial and error to debug the complex regular expressions required. For lightweight tests which execute in a few seconds, this trial and error may not be an issue at all. However, when dealing with tests which take longer to run, this method can quickly become tedious and inefficient. From d7c8d147caed893194b7b6b31cc48f8cfb459d86 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Wed, 1 Sep 2021 12:23:46 +0200 Subject: [PATCH 08/11] Fix typo in the docs --- docs/tutorial_basics.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index 7bbacc448e..0dd95a6499 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -809,7 +809,7 @@ Examining the performance logs ReFrame has a powerful mechanism for logging its activities as well as performance data. It supports different types of log channels and it can send data simultaneously in any number of them. -For example, performance data might be logged in files and the same time being sent to Syslog or to a centralized log management server. +For example, performance data might be logged in files and at the same time being sent to Syslog or to a centralized log management server. By default (i.e., starting off from the builtin configuration file), ReFrame sends performance data to files per test under the ``perflogs/`` directory: .. code-block:: none From 2c794d1d2996101184c6607e4a1d02aabf6004df Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Thu, 9 Sep 2021 15:32:15 +0200 Subject: [PATCH 09/11] Address comments --- docs/configure.rst | 2 +- docs/tutorial_advanced.rst | 8 +-- docs/tutorial_basics.rst | 90 +++++++++++++++++----------- docs/tutorial_tips_tricks.rst | 2 +- tutorials/basics/hellomp/hellomp2.py | 6 +- tutorials/basics/hellomp/hellomp3.py | 6 +- tutorials/basics/stream/stream2.py | 12 +--- tutorials/basics/stream/stream3.py | 38 +++--------- 8 files changed, 77 insertions(+), 87 deletions(-) diff --git a/docs/configure.rst b/docs/configure.rst index f568aa8be4..83a40b081b 100644 --- a/docs/configure.rst +++ b/docs/configure.rst @@ -194,7 +194,7 @@ You can view logger's log level as a general cut off. For example, if we have set it to ``warning``, no debug or informational messages would ever be printed. Finally, there is a special set of handlers for handling performance log messages. -Performance log messages are generated *only* for `performance tests `__, i.e., tests defining the :attr:`perf_variables ` attribute. +Performance log messages are generated *only* for `performance tests `__, i.e., tests defining the :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` or the :attr:`~reframe.core.pipeline.RegressionTest.perf_patterns` attributes. The performance log handlers are stored in the ``handlers_perflog`` property. The ``filelog`` handler used in this example will create a file per test and per system/partition combination (``.///.log``) and will append to it the obtained performance data every time a performance test is run. Notice how the message to be logged is structured in the ``format`` property, such that it can be easily parsed from post processing tools. diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index 6db9dfc032..2628e0204b 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -161,7 +161,7 @@ Let's inspect the build script generated by ReFrame: trap _onerror ERR - make -j 1 CPPFLAGS="-DELEM_TYPE=float" + make -j 1 CPPFLAGS="-DELEM_TYPE=float" CC=cc CXX=CC The compiler variables (``CC``, ``CXX`` etc.) are set based on the corresponding values specified in the `configuration `__ of the current environment. @@ -175,7 +175,7 @@ In this case, ``make`` will be invoked as follows: .. code:: - make -j 1 CPPFLAGS="-DELEM_TYPE=float" + make -j 1 CPPFLAGS="-DELEM_TYPE=float" CC=cc CXX=CC Notice that the ``-j 1`` option is always generated. We can increase the build concurrency by setting the :attr:`~reframe.core.buildsystems.Make.max_concurrency` attribute. @@ -681,8 +681,8 @@ The test will verify that all the nodes print the expected host name: :emphasize-lines: 10- The first thing to notice in this test is that :attr:`~reframe.core.pipeline.RegressionTest.num_tasks` is set to zero as default, which is a requirement for flexible tests. -However, this value is set to the actual number of tasks during the ``run`` pipeline stage. -Lastly, the sanity check of this test counts the host names printed and verifies that the total count equals :attr:`~reframe.core.pipeline.RegressionTest.num_tasks`. +However, with flexible tests, this value is updated right after the job completes to the actual number of tasks that were used. +Consequenly, this allows the sanity function of the tests to assert that the number host names printed matches :attr:`~reframe.core.pipeline.RegressionTest.num_tasks`. .. |--flex-alloc-nodes| replace:: :attr:`--flex-alloc-nodes` .. _--flex-alloc-nodes: manpage.html#cmdoption-flex-alloc-nodes diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index 0dd95a6499..d32b77b5af 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -664,11 +664,13 @@ For running the benchmark, we need to set the OpenMP number of threads and pin t You can set environment variables in a ReFrame test through the :attr:`~reframe.core.pipeline.RegressionTest.variables` dictionary. What makes a ReFrame test a performance test is the definition of at least one :ref:`performance function`. -Similarly to a test's :func:`@sanity_function`, a performance function is simply a member function decorated with the :attr:`@performance_function` decorator, which is responsible for extracting a specified performance quantity from a regression test. -The :attr:`@performance_function` decorator must be passed the units of the quantity to be extracted, and it also takes the optional argument ``perf_key`` to customize the name of the extracted performance variable. -If ``perf_key`` is not provided, the performance variable will take the name of the decorated performance function. +Similarly to a test's :func:`@sanity_function`, a performance function is a member function decorated with the :attr:`@performance_function` decorator, which binds the decorated function to a given unit. +These functions can be used by the regression test to extract, measure or compute a given quantity of interest; where in this context, the values returned by a performance function are referred to as performance variables. +Alternatively, performance functions can also be thought as `tools` available to the regression test for extracting performance variables. +By default, ReFrame will attempt to execute all the available performance functions during the test's ``performance`` stage, producing a single performance variable out of each of the available performance functions. +These default-generated performance variables are defined in the regression test's attribute :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` during class instantiation, and their default name matches the name of their associated performance function. +However, one could customize the default-generated performance variable's name by passing the ``perf-key`` argument to the :attr:`@performance_function` decorator of the associated performance function. -ReFrame identifies all member functions that use the :attr:`@performance_function` decorator, and will automatically schedule them for execution during the ``performance`` pipeline stage of the test. In this example, we extract four performance variables, namely the memory bandwidth values for each of the "Copy", "Scale", "Add" and "Triad" sub-benchmarks of STREAM, where each of the performance functions use the :func:`~reframe.utility.sanity.extractsingle` utility function. For each of the sub-benchmarks we extract the "Best Rate MB/s" column of the output (see below) and we convert that to a float. @@ -731,6 +733,45 @@ The :option:`--performance-report` will generate a short report at the end for e Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-gczplnic.log' +--------------------------------------------------- +Setting explicitly the test's performance variables +--------------------------------------------------- + +In the above STREAM example, all four performance functions were almost identical except for a small part of the regex pattern, which led to some code repetition. +Even though the performance functions were rather simple and the code repetition was not much in that case, this is still not a good practice and it is certainly an approach that would not scale when using more complex performance functions. +Hence, in this example, we show how to collapse all these four performance functions into a single function and how to reuse this single performance function to create multiple performance variables. + +.. code-block:: console + + cat tutorials/basics/stream/stream2.py + +.. literalinclude:: ../tutorials/basics/stream/stream2.py + :lines: 6- + :emphasize-lines: 28- + +As shown in the highlighted lines, this example collapses the four performance functions from the previous example into the :func:`extract_bw` function, which is also decorated with the :attr:`@performance_function` decorator with the units set to ``'MB/s'``. +However, the :func:`extract_bw` function now takes the optional argument ``kind`` which selects the STREAM benchmark to extract. +By default, this argument is set to ``'Copy'`` because functions decorated with :attr:`@performance_function` are only allowed to have ``self`` as a non-default argument. +Thus, from this performance function definition, ReFrame will default-generate a single performance variable during the test instantiation under the name ``extract_bw``, where this variable will report the performance results from the ``Copy`` benchmark. +With no further action from our side, ReFrame would just report the performance of the test based on this default-generated performance variable, but that is not what we are after here. +Therefore, we must modify these default performance variables so that this version of the STREAM test produces the same results as in the previous example. +As mentioned before, the performance variables (also the default-generated ones) are stored in the :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` dictionary, so all we need to do is to redefine this mapping with our desired performance variables as done in the pre-performance pipeline hook :func:`set_perf_variables`. + +.. tip:: + Performance functions may also be generated inline using the :func:`~reframe.utility.sanity.make_performance_function` utility as shown below. + + .. code-block:: python + + @run_before('performance') + def set_perf_vars(self): + self.perf_variables = { + 'Copy': sn.make_performance_function( + sn.extractsingle(r'Copy:\s+(\S+)\s+.*', + self.stdout, 1, float), + 'MB/s' + ) + } + ----------------------- Adding reference values ----------------------- @@ -747,22 +788,23 @@ In the following example, we set the reference values for all the STREAM sub-ben .. code-block:: console - cat tutorials/basics/stream/stream2.py + cat tutorials/basics/stream/stream3.py -.. literalinclude:: ../tutorials/basics/stream/stream2.py +.. literalinclude:: ../tutorials/basics/stream/stream3.py :lines: 6- :emphasize-lines: 18-25 The performance reference tuple consists of the reference value, the lower and upper thresholds expressed as fractional numbers relative to the reference value, and the unit of measurement. If any of the thresholds is not relevant, :class:`None` may be used instead. +Also, the units in this :attr:`~reframe.core.pipeline.RegressionTest.reference` variable are entirely optional, since they were already provided through the :attr:`@performance_function` decorator. If any obtained performance value is beyond its respective thresholds, the test will fail with a summary as shown below: .. code-block:: console - ./bin/reframe -c tutorials/basics/stream/stream2.py -r --performance-report + ./bin/reframe -c tutorials/basics/stream/stream3.py -r --performance-report .. code-block:: none @@ -779,30 +821,6 @@ If any obtained performance value is beyond its respective thresholds, the test * Rerun with '-n StreamWithRefTest -p gnu --system catalina:default' * Reason: performance error: failed to meet reference: Copy=24586.5, expected 55200 (l=52440.0, u=57960.0) -Also, note how the performance syntax for this example is far more compact in comparison to our first iteration of the STREAM test. -In that first STREAM example, all four performance functions were almost identical, except for a small part of the regex pattern, which led to a lot of code repetition. -Hence, this example collapses all four performance functions into a single performance function, which now takes an optional argument to select the quantity to extract. -Then, the performance variables of the test can be defined by setting the respective entries in the :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` dictionary. - -.. literalinclude:: ../tutorials/basics/stream/stream2.py - :lines: 41- - -.. note:: - Performance functions may also be generated inline using the :func:`~reframe.utility.sanity.make_performance_function` utility as shown below. - - .. code-block:: python - - @run_before('performance') - def set_perf_vars(self): - self.perf_variables = { - 'Copy': sn.make_performance_function( - sn.extractsingle(r'Copy:\s+(\S+)\s+.*', - self.stdout, 1, float), - 'MB/s' - ) - } - - ------------------------------ Examining the performance logs ------------------------------ @@ -1122,9 +1140,9 @@ Let's see and comment the changes: .. code-block:: console - cat tutorials/basics/stream/stream3.py + cat tutorials/basics/stream/stream4.py -.. literalinclude:: ../tutorials/basics/stream/stream3.py +.. literalinclude:: ../tutorials/basics/stream/stream4.py :lines: 6- :emphasize-lines: 8, 27-41, 46-56 @@ -1155,18 +1173,18 @@ Let's run our adapted test now: .. code-block:: console - ./bin/reframe -c tutorials/basics/stream/stream3.py -r --performance-report + ./bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report .. code-block:: none [ReFrame Setup] version: 3.3-dev0 (rev: cb974c13) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/stream/stream3.py -r --performance-report' + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/stream/stream4.py -r --performance-report' launched by: user@dom101 working directory: '/users/user/Devel/reframe' settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/basics/stream/stream3.py' + check search path: '/users/user/Devel/reframe/tutorials/basics/stream/stream4.py' stage directory: '/users/user/Devel/reframe/stage' output directory: '/users/user/Devel/reframe/output' diff --git a/docs/tutorial_tips_tricks.rst b/docs/tutorial_tips_tricks.rst index 8171545661..6f63faf191 100644 --- a/docs/tutorial_tips_tricks.rst +++ b/docs/tutorial_tips_tricks.rst @@ -90,7 +90,7 @@ As suggested by the warning message, passing :option:`-v` will give you the stac Debugging deferred expressions ============================== -Although deferred expression that are used in sanity and performance functions behave similarly to normal Python expressions, you need to understand their `implicit evaluation rules `__. +Although deferred expressions that are used in sanity and performance functions behave similarly to normal Python expressions, you need to understand their `implicit evaluation rules `__. One of the rules is that :func:`str` triggers the implicit evaluation, so trying to use the standard :func:`print` function with a deferred expression, you might get unexpected results if that expression is not yet to be evaluated. For this reason, ReFrame offers a sanity function counterpart of :func:`print`, which allows you to safely print deferred expressions. diff --git a/tutorials/basics/hellomp/hellomp2.py b/tutorials/basics/hellomp/hellomp2.py index d9ea0a6897..7182ae9ac0 100644 --- a/tutorials/basics/hellomp/hellomp2.py +++ b/tutorials/basics/hellomp/hellomp2.py @@ -24,6 +24,6 @@ def set_compilation_flags(self): @sanity_function def assert_num_messages(self): - num_messages = len(sn.findall(r'\[\s?\d+\] Hello, World\!', - self.stdout).evaluate()) - return num_messages == 16 + num_messages = sn.len(sn.findall(r'\[\s?\d+\] Hello, World\!', + self.stdout)) + return sn.assert_eq(num_messages, 16) diff --git a/tutorials/basics/hellomp/hellomp3.py b/tutorials/basics/hellomp/hellomp3.py index a2d7ffcc17..8753bb3894 100644 --- a/tutorials/basics/hellomp/hellomp3.py +++ b/tutorials/basics/hellomp/hellomp3.py @@ -25,6 +25,6 @@ def set_compilation_flags(self): @sanity_function def assert_num_messages(self): - num_messages = len(sn.findall(r'\[\s?\d+\] Hello, World\!', - self.stdout).evaluate()) - return num_messages == 16 + num_messages = sn.len(sn.findall(r'\[\s?\d+\] Hello, World\!', + self.stdout)) + return sn.assert_eq(num_messages, 16) diff --git a/tutorials/basics/stream/stream2.py b/tutorials/basics/stream/stream2.py index 52aa740bd2..0120736e34 100644 --- a/tutorials/basics/stream/stream2.py +++ b/tutorials/basics/stream/stream2.py @@ -20,14 +20,6 @@ class StreamWithRefTest(rfm.RegressionTest): 'OMP_NUM_THREADS': '4', 'OMP_PLACES': 'cores' } - reference = { - 'catalina': { - 'Copy': (25200, -0.05, 0.05, 'MB/s'), - 'Scale': (16800, -0.05, 0.05, 'MB/s'), - 'Add': (18500, -0.05, 0.05, 'MB/s'), - 'Triad': (18800, -0.05, 0.05, 'MB/s') - } - } @run_before('compile') def set_compiler_flags(self): @@ -41,7 +33,8 @@ def validate_solution(self): @performance_function('MB/s') def extract_bw(self, kind='Copy'): '''Generic performance extraction function.''' - if kind not in {'Copy', 'Scale', 'Add', 'Triad'}: + + if kind not in ('Copy', 'Scale', 'Add', 'Triad'): raise ValueError(f'illegal value in argument kind ({kind!r})') return sn.extractsingle(rf'{kind}:\s+(\S+)\s+.*', @@ -50,6 +43,7 @@ def extract_bw(self, kind='Copy'): @run_before('performance') def set_perf_variables(self): '''Build the dictionary with all the performance variables.''' + self.perf_variables = { 'Copy': self.extract_bw(), 'Scale': self.extract_bw('Scale'), diff --git a/tutorials/basics/stream/stream3.py b/tutorials/basics/stream/stream3.py index a1a3cc5b5e..99e022224b 100644 --- a/tutorials/basics/stream/stream3.py +++ b/tutorials/basics/stream/stream3.py @@ -8,9 +8,9 @@ @rfm.simple_test -class StreamMultiSysTest(rfm.RegressionTest): +class StreamWithRefTest(rfm.RegressionTest): valid_systems = ['*'] - valid_prog_environs = ['cray', 'gnu', 'intel', 'pgi'] + valid_prog_environs = ['gnu'] prebuild_cmds = [ 'wget http://www.cs.virginia.edu/stream/FTP/Code/stream.c', ] @@ -29,36 +29,10 @@ class StreamMultiSysTest(rfm.RegressionTest): } } - # Flags per programming environment - flags = variable(dict, value={ - 'cray': ['-fopenmp', '-O3', '-Wall'], - 'gnu': ['-fopenmp', '-O3', '-Wall'], - 'intel': ['-qopenmp', '-O3', '-Wall'], - 'pgi': ['-mp', '-O3'] - }) - - # Number of cores for each system - cores = variable(dict, value={ - 'catalina:default': 4, - 'daint:gpu': 12, - 'daint:mc': 36, - 'daint:login': 10 - }) - @run_before('compile') def set_compiler_flags(self): self.build_system.cppflags = ['-DSTREAM_ARRAY_SIZE=$((1 << 25))'] - environ = self.current_environ.name - self.build_system.cflags = self.flags.get(environ, []) - - @run_before('run') - def set_num_threads(self): - num_threads = self.cores.get(self.current_partition.fullname, 1) - self.num_cpus_per_task = num_threads - self.variables = { - 'OMP_NUM_THREADS': str(num_threads), - 'OMP_PLACES': 'cores' - } + self.build_system.cflags = ['-fopenmp', '-O3', '-Wall'] @sanity_function def validate_solution(self): @@ -66,7 +40,9 @@ def validate_solution(self): @performance_function('MB/s') def extract_bw(self, kind='Copy'): - if kind not in {'Copy', 'Scale', 'Add', 'Triad'}: + '''Generic performance extraction function.''' + + if kind not in ('Copy', 'Scale', 'Add', 'Triad'): raise ValueError(f'illegal value in argument kind ({kind!r})') return sn.extractsingle(rf'{kind}:\s+(\S+)\s+.*', @@ -74,6 +50,8 @@ def extract_bw(self, kind='Copy'): @run_before('performance') def set_perf_variables(self): + '''Build the dictionary with all the performance variables.''' + self.perf_variables = { 'Copy': self.extract_bw(), 'Scale': self.extract_bw('Scale'), From 5793df10f67a76429a309993da52438da1142c30 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 13 Sep 2021 23:22:30 +0200 Subject: [PATCH 10/11] Address PR comments --- docs/tutorial_advanced.rst | 6 +++--- tutorials/basics/stream/stream2.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index 2628e0204b..628ce8048d 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -161,7 +161,7 @@ Let's inspect the build script generated by ReFrame: trap _onerror ERR - make -j 1 CPPFLAGS="-DELEM_TYPE=float" CC=cc CXX=CC + make -j 1 CC="cc" CXX="CC" FC="ftn" NVCC="nvcc" CPPFLAGS="-DELEM_TYPE=float" The compiler variables (``CC``, ``CXX`` etc.) are set based on the corresponding values specified in the `configuration `__ of the current environment. @@ -175,7 +175,7 @@ In this case, ``make`` will be invoked as follows: .. code:: - make -j 1 CPPFLAGS="-DELEM_TYPE=float" CC=cc CXX=CC + make -j 1 CPPFLAGS="-DELEM_TYPE=float" Notice that the ``-j 1`` option is always generated. We can increase the build concurrency by setting the :attr:`~reframe.core.buildsystems.Make.max_concurrency` attribute. @@ -682,7 +682,7 @@ The test will verify that all the nodes print the expected host name: The first thing to notice in this test is that :attr:`~reframe.core.pipeline.RegressionTest.num_tasks` is set to zero as default, which is a requirement for flexible tests. However, with flexible tests, this value is updated right after the job completes to the actual number of tasks that were used. -Consequenly, this allows the sanity function of the tests to assert that the number host names printed matches :attr:`~reframe.core.pipeline.RegressionTest.num_tasks`. +Consequently, this allows the sanity function of the test to assert that the number host names printed matches :attr:`~reframe.core.pipeline.RegressionTest.num_tasks`. .. |--flex-alloc-nodes| replace:: :attr:`--flex-alloc-nodes` .. _--flex-alloc-nodes: manpage.html#cmdoption-flex-alloc-nodes diff --git a/tutorials/basics/stream/stream2.py b/tutorials/basics/stream/stream2.py index 0120736e34..33a32b1550 100644 --- a/tutorials/basics/stream/stream2.py +++ b/tutorials/basics/stream/stream2.py @@ -8,7 +8,7 @@ @rfm.simple_test -class StreamWithRefTest(rfm.RegressionTest): +class StreamAltTest(rfm.RegressionTest): valid_systems = ['*'] valid_prog_environs = ['gnu'] prebuild_cmds = [ From 4eb22aa432de39d41560e3d7c64b8db2f17110f2 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Tue, 14 Sep 2021 22:11:20 +0200 Subject: [PATCH 11/11] Add stream4 file --- tutorials/basics/stream/stream4.py | 82 ++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 tutorials/basics/stream/stream4.py diff --git a/tutorials/basics/stream/stream4.py b/tutorials/basics/stream/stream4.py new file mode 100644 index 0000000000..a1a3cc5b5e --- /dev/null +++ b/tutorials/basics/stream/stream4.py @@ -0,0 +1,82 @@ +# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import reframe as rfm +import reframe.utility.sanity as sn + + +@rfm.simple_test +class StreamMultiSysTest(rfm.RegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['cray', 'gnu', 'intel', 'pgi'] + prebuild_cmds = [ + 'wget http://www.cs.virginia.edu/stream/FTP/Code/stream.c', + ] + build_system = 'SingleSource' + sourcepath = 'stream.c' + variables = { + 'OMP_NUM_THREADS': '4', + 'OMP_PLACES': 'cores' + } + reference = { + 'catalina': { + 'Copy': (25200, -0.05, 0.05, 'MB/s'), + 'Scale': (16800, -0.05, 0.05, 'MB/s'), + 'Add': (18500, -0.05, 0.05, 'MB/s'), + 'Triad': (18800, -0.05, 0.05, 'MB/s') + } + } + + # Flags per programming environment + flags = variable(dict, value={ + 'cray': ['-fopenmp', '-O3', '-Wall'], + 'gnu': ['-fopenmp', '-O3', '-Wall'], + 'intel': ['-qopenmp', '-O3', '-Wall'], + 'pgi': ['-mp', '-O3'] + }) + + # Number of cores for each system + cores = variable(dict, value={ + 'catalina:default': 4, + 'daint:gpu': 12, + 'daint:mc': 36, + 'daint:login': 10 + }) + + @run_before('compile') + def set_compiler_flags(self): + self.build_system.cppflags = ['-DSTREAM_ARRAY_SIZE=$((1 << 25))'] + environ = self.current_environ.name + self.build_system.cflags = self.flags.get(environ, []) + + @run_before('run') + def set_num_threads(self): + num_threads = self.cores.get(self.current_partition.fullname, 1) + self.num_cpus_per_task = num_threads + self.variables = { + 'OMP_NUM_THREADS': str(num_threads), + 'OMP_PLACES': 'cores' + } + + @sanity_function + def validate_solution(self): + return sn.assert_found(r'Solution Validates', self.stdout) + + @performance_function('MB/s') + def extract_bw(self, kind='Copy'): + if kind not in {'Copy', 'Scale', 'Add', 'Triad'}: + raise ValueError(f'illegal value in argument kind ({kind!r})') + + return sn.extractsingle(rf'{kind}:\s+(\S+)\s+.*', + self.stdout, 1, float) + + @run_before('performance') + def set_perf_variables(self): + self.perf_variables = { + 'Copy': self.extract_bw(), + 'Scale': self.extract_bw('Scale'), + 'Add': self.extract_bw('Add'), + 'Triad': self.extract_bw('Triad'), + }