diff --git a/docs/_static/img/test-deps-by-case.svg b/docs/_static/img/test-deps-by-case.svg new file mode 100644 index 0000000000..3f12db930f --- /dev/null +++ b/docs/_static/img/test-deps-by-case.svg @@ -0,0 +1,3 @@ + + +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
by_case
by_case
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-by-env.svg b/docs/_static/img/test-deps-by-env.svg index 54b6a25cf2..37588b0cff 100644 --- a/docs/_static/img/test-deps-by-env.svg +++ b/docs/_static/img/test-deps-by-env.svg @@ -1,3 +1,3 @@ -
T0,E0
T0,E0
T0,E1
T0,E1
T1,E0
T1,E0
T1,E1
T1,E1
T0
T0
T1
T1
\ No newline at end of file +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
by_env
by_env
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-by-part.svg b/docs/_static/img/test-deps-by-part.svg new file mode 100644 index 0000000000..25fd88d072 --- /dev/null +++ b/docs/_static/img/test-deps-by-part.svg @@ -0,0 +1,3 @@ + + +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
by_part
by_part
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-by-xcase.svg b/docs/_static/img/test-deps-by-xcase.svg new file mode 100644 index 0000000000..9780d097f7 --- /dev/null +++ b/docs/_static/img/test-deps-by-xcase.svg @@ -0,0 +1,3 @@ + + +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
by_xcase
by_xcase
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-by-xenv.svg b/docs/_static/img/test-deps-by-xenv.svg new file mode 100644 index 0000000000..68c4accd54 --- /dev/null +++ b/docs/_static/img/test-deps-by-xenv.svg @@ -0,0 +1,3 @@ + + +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
by_xenv
by_xenv
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-by-xpart.svg b/docs/_static/img/test-deps-by-xpart.svg new file mode 100644 index 0000000000..d60e6e9c3f --- /dev/null +++ b/docs/_static/img/test-deps-by-xpart.svg @@ -0,0 +1,3 @@ + + +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
by_xpart
by_xpart
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-custom.svg b/docs/_static/img/test-deps-custom.svg new file mode 100644 index 0000000000..d8ccc49d12 --- /dev/null +++ b/docs/_static/img/test-deps-custom.svg @@ -0,0 +1,3 @@ + + +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
custom
custom
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-cycle.svg b/docs/_static/img/test-deps-cycle.svg index 35a15fd9ff..f309764de7 100644 --- a/docs/_static/img/test-deps-cycle.svg +++ b/docs/_static/img/test-deps-cycle.svg @@ -1,3 +1,3 @@ -
T0,E0
T0,E0
T0,E1
T0,E1
T1,E0
T1,E0
T1,E1
T1,E1
T0
T0
T1
T1
X
X
\ No newline at end of file +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
X
X
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-dangling.svg b/docs/_static/img/test-deps-dangling.svg deleted file mode 100644 index 544dc80478..0000000000 --- a/docs/_static/img/test-deps-dangling.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
T0,E0
T0,E0
T1,E0
T1,E0
T1,E1
T1,E1
T0
T0
T1
T1
X
X
\ No newline at end of file diff --git a/docs/_static/img/test-deps-exact.svg b/docs/_static/img/test-deps-exact.svg deleted file mode 100644 index 1ad602664e..0000000000 --- a/docs/_static/img/test-deps-exact.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
T0,E0
T0,E0
T0,E1
T0,E1
T1,E0
T1,E0
T1,E1
T1,E1
T0
T0
T1
T1
\ No newline at end of file diff --git a/docs/_static/img/test-deps-fully.svg b/docs/_static/img/test-deps-fully.svg index 92fbdb8891..83ed057454 100644 --- a/docs/_static/img/test-deps-fully.svg +++ b/docs/_static/img/test-deps-fully.svg @@ -1,3 +1,3 @@ -
T0,E0
T0,E0
T0,E1
T0,E1
T1,E0
T1,E0
T1,E1
T1,E1
T0
T0
T1
T1
\ No newline at end of file +
T0, P0, E0
T0, P0, E0
T0, P0, E1
T0, P0, E1
T0
T0
T0, P1, E0
T0, P1, E0
T0, P1, E1
T0, P1, E1
T1, P0, E0
T1, P0, E0
T1, P0, E1
T1, P0, E1
T1
T1
T1, P1, E0
T1, P1, E0
T1, P1, E1
T1, P1, E1
fully
fully
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/dependencies.rst b/docs/dependencies.rst index 1c79a26133..29ee052794 100644 --- a/docs/dependencies.rst +++ b/docs/dependencies.rst @@ -6,15 +6,25 @@ Dependencies in ReFrame are defined at the test level using the :func:`depends_o We will see the rules of that projection in a while. The dependency graph construction and the subsequent dependency analysis happen also at the level of the test cases. -Let's assume that test :class:`T1` depends in :class:`T0`. +Let's assume that test :class:`T1` depends on :class:`T0`. This can be expressed inside :class:`T1` using the :func:`depends_on` method: .. code:: python + @rfm.simple_test + class T0(rfm.RegressionTest): + def __init__(self): + ... + self.valid_systems = ['P0', 'P1'] + self.valid_prog_environs = ['E0', 'E1'] + + @rfm.simple_test class T1(rfm.RegressionTest): def __init__(self): ... + self.valid_systems = ['P0', 'P1'] + self.valid_prog_environs = ['E0', 'E1'] self.depends_on('T0') Conceptually, this dependency can be viewed at the test level as follows: @@ -22,18 +32,20 @@ Conceptually, this dependency can be viewed at the test level as follows: .. figure:: _static/img/test-deps.svg :align: center - :alt: Simple test dependency presented conceptually. + + :sub:`Simple test dependency presented conceptually.` For most of the cases, this is sufficient to reason about test dependencies. In reality, as mentioned above, dependencies are handled at the level of test cases. -Test cases on different partitions are always independent. -If not specified differently, test cases using programming environments are also independent. +If not specified differently, test cases on different partitions or programming environments are independent. This is the default behavior of the :func:`depends_on` function. -The following image shows the actual test case dependencies assuming that both tests support the ``E0`` and ``E1`` programming environments (for simplicity, we have omitted the partitions, since tests are always independent in that dimension): +The following image shows the actual test case dependencies of the two tests above: -.. figure:: _static/img/test-deps-by-env.svg +.. figure:: _static/img/test-deps-by-case.svg :align: center - :alt: Test case dependencies by environment (default). + + :sub:`Test case dependencies partitioned by case (default).` + This means that test cases of :class:`T1` may start executing before all test cases of :class:`T0` have finished. You can impose a stricter dependency between tests, such that :class:`T1` does not start execution unless all test cases of :class:`T0` have finished. @@ -41,99 +53,141 @@ You can achieve this as follows: .. code:: python + import reframe.utility.udeps as udeps + + @rfm.simple_test class T1(rfm.RegressionTest): def __init__(self): ... - self.depends_on('T0', how=rfm.DEPEND_FULLY) + self.depends_on('T0', how=udeps.fully) -This will create the following test case graph: + +This will create a fully connected graph between the test cases of the two tests as it is shown in the following figure: .. figure:: _static/img/test-deps-fully.svg :align: center - :alt: Fully dependent test cases. + + :sub:`Fully dependent test cases.` + + +There are more options that the test case subgraph can be split than the two extremes we presented so far. +The following figures show the different splittings. -You may also create arbitrary dependencies between the test cases of different tests, like in the following example, where the dependencies cannot be represented in any of the other two ways: +Split by partition +------------------ -.. figure:: _static/img/test-deps-exact.svg +The test cases are split in fully connected components per partition. +Test cases from different partitions are independent. + +.. figure:: _static/img/test-deps-by-part.svg :align: center - :alt: Arbitrary test cases dependencies -These dependencies can be achieved as follows: + :sub:`Test case dependencies partitioned by partition.` -.. code:: python - @rfm.simple_test - class T1(rfm.RegressionTest): - def __init__(self): - ... - self.depends_on('T0', how=rfm.DEPEND_EXACT, - subdeps={'E0': ['E0', 'E1'], 'E1': ['E1']}) +Split by environment +-------------------- -The ``subdeps`` argument defines the sub-dependencies between the test cases of :class:`T1` and :class:`T0` using an adjacency list representation. +The test cases are split in fully connected components per environment. +Test cases from different environments are independent. +.. figure:: _static/img/test-deps-by-env.svg + :align: center -Cyclic dependencies -------------------- + :sub:`Test case dependencies partitioned by environment.` -Obviously, cyclic dependencies between test cases are not allowed. -Cyclic dependencies between tests are not allowed either, even if the test case dependency graph is acyclic. -For example, the following dependency set up is invalid: -.. figure:: _static/img/test-deps-cycle.svg +Split by exclusive partition +---------------------------- + +The test cases are split in fully connected components that do not contain the same partition. +Test cases from the same partition are independent. + +.. figure:: _static/img/test-deps-by-xpart.svg :align: center - :alt: Any cyclic dependencies between tests are not allowed, even if the underlying test case dependencies are not forming a cycle. -The test case dependencies here, clearly, do not form a cycle, but the edge from ``(T0, E0)`` to ``(T1, E1)`` introduces a dependency from ``T0`` to ``T1`` forming a cycle at the test level. -The reason we impose this restriction is that we wanted to keep the original processing of tests by ReFrame, where all the test cases of a test are processed before moving to the next one. -Supporting this type of dependencies would require to change substantially ReFrame's output. + :sub:`Test case dependencies partitioned by exclusive partition.` -Dangling dependencies ---------------------- +Split by exclusive environment +------------------------------ -In our discussion so far, :class:`T0` and :class:`T1` had the same valid programming environments. -What happens if they do not? -Assume, for example, that :class:`T0` and :class:`T1` are defined as follows: +The test cases are split in fully connected components that do not contain the same environment. +Test cases from the same environment are independent. -.. code:: python +.. figure:: _static/img/test-deps-by-xenv.svg + :align: center - import reframe as rfm - import reframe.utility.sanity as sn + :sub:`Test case dependencies partitioned by exclusive environment.` - @rfm.simple_test - class T0(rfm.RegressionTest): - def __init__(self): - self.valid_systems = ['P0'] - self.valid_prog_environs = ['E0'] - ... +Split by exclusive case +----------------------- + +The test cases are split in fully connected components that do not contain the same environment and the same partition. +Test cases from the same environment and the same partition are independent. + +.. figure:: _static/img/test-deps-by-xcase.svg + :align: center + + :sub:`Test case dependencies partitioned by exclusive case.` + + + +Custom splits +------------- + +Users may define custom dependency patterns by supplying their own ``how`` function. +The ``how`` argument accepts a :py:class:`callable` which takes as arguments the source and destination of a possible edge in the test case subgraph. +If the callable returns :class:`True`, then ReFrame will place an edge (i.e., a dependency) otherwise not. +The following code will create dependencies only if the source partition is ``P0`` and the destination environment is ``E1``: + +.. code:: python + + def myway(src, dst): + psrc, esrc = src + pdst, edst = dst + return psrc == 'P0' and edst == 'E1' @rfm.simple_test class T1(rfm.RegressionTest): def __init__(self): - self.valid_systems = ['P0'] - self.valid_prog_environs = ['E0', 'E1'] - self.depends_on('T0') ... + self.depends_on('T0', how=myway) + + +This corresponds to the following test case dependency subgraph: + + +.. figure:: _static/img/test-deps-custom.svg + :align: center + + :sub:`Custom test case dependencies.` + + +Notice how all the rest test cases are completely independent. -As discussed previously, :func:`depends_on` will create one-to-one dependencies between the different programming environment test cases. -So in this case it will try to create an edge from ``(T1, E1)`` to ``(T0, E1)`` as shown below: -.. figure:: _static/img/test-deps-dangling.svg +Cyclic dependencies +------------------- + +Obviously, cyclic dependencies between test cases are not allowed. +Cyclic dependencies between tests are not allowed either, even if the test case dependency graph is acyclic. +For example, the following dependency set up is invalid: + +.. figure:: _static/img/test-deps-cycle.svg :align: center - :alt: When the target test is valid for less programming environments than the source test, a dangling dependency would be created. + :alt: Any cyclic dependencies between tests are not allowed, even if the underlying test case dependencies are not forming a cycle. +The test case dependencies here, clearly, do not form a cycle, but the edge from ``(T0, P0, E0)`` to ``(T1, P0, E1)`` introduces a dependency from ``T0`` to ``T1`` forming a cycle at the test level. +If you end up requiring such type of dependency in your tests, you might have to reconsider how you organize your tests. -This edge cannot be resolved since the target test case does not exist. -ReFrame will complain and issue an error while trying to build the test dependency graph. -The remedy to this is to use either ``DEPEND_FULLY`` or pass the exact dependencies with ``DEPEND_EXACT`` to :func:`depends_on`. +.. note:: + Technically, the framework could easily support such types of dependencies, but ReFrame's output would have to change substantially. -If :class:`T0` and :class:`T1` had their :attr:`valid_prog_environs` swapped, such that :class:`T0` supported ``E0`` and ``E1`` and :class:`T1` supported only ``E0``, -the default :func:`depends_on` mode would work fine. -The ``(T0, E1)`` test case would simply have no dependent test cases. Resolving dependencies @@ -143,7 +197,7 @@ As shown in the :doc:`tutorial_deps`, test dependencies would be of limited usag Let's reiterate over the :func:`set_executable` function of the :class:`OSULatencyTest` that we presented previously: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 32-38 + :lines: 37-43 The ``@require_deps`` decorator does some magic -- we will unravel this shortly -- with the function arguments of the :func:`set_executable` function and binds them to the target test dependencies by their name. However, as discussed in this section, dependencies are defined at test case level, so the ``OSUBuildTest`` function argument is bound to a special function that allows you to retrieve an actual test case of the target dependency. diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index 59ede7272b..24992768f7 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -7,34 +7,29 @@ Tutorial 2: Using Dependencies in ReFrame Tests A ReFrame test may define dependencies to other tests. An example scenario is to test different runtime configurations of a benchmark that you need to compile, or run a scaling analysis of a code. -In such cases, you don't want to rebuild your test for each runtime configuration. -You could have a build test, which all runtime tests would depend on. +In such cases, you don't want to download and rebuild your test for each runtime configuration. +You could have a test where only the sources are fetched, and which all build tests would depend on. +And, similarly, all the runtime tests would depend on their corresponding build test. This is the approach we take with the following example, that fetches, builds and runs several `OSU benchmarks `__. -We first create a basic compile-only test, that fetches the benchmarks and builds them for the different programming environments: +We first create a basic run-only test, that fetches the benchmarks: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 92-106 + :lines: 112-123 -There is nothing particular to that test, except perhaps that you can set :attr:`sourcesdir ` to ``None`` even for a test that needs to compile something. -In such a case, you should at least provide the commands that fetch the code inside the :attr:`prebuild_cmds ` attribute. - -For the next test we need to use the OSU benchmark binaries that we just built, so as to run the MPI ping-pong benchmark. -Here is the relevant part: +This test doesn't need any specific programming environment, so we simply pick the `gnu` environment in the `login` partition. +The build tests would then copy the benchmark code and build it for the different programming environments: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 12-44 - -First, since we will have multiple similar benchmarks, we move all the common functionality to the :class:`OSUBenchmarkTestBase` base class. -Again nothing new here; we are going to use two nodes for the benchmark and we set :attr:`sourcesdir ` to ``None``, since none of the benchmark tests will use any additional resources. -The new part comes in with the :class:`OSULatencyTest` test in the following line: + :lines: 93-109 +The only new thing that comes in with the :class:`OSUBuildTest` test is the following line: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 32 + :lines: 99 -Here we tell ReFrame that this test depends on a test named ``OSUBuildTest``. +Here we tell ReFrame that this test depends on a test named ``OSUDownloadTest``. This test may or may not be defined in the same test file; all ReFrame needs is the test name. -By default, the :func:`depends_on() ` function will create dependencies between the individual test cases of the :class:`OSULatencyTest` and the :class:`OSUBuildTest`, such that the :class:`OSULatencyTest` using ``PrgEnv-gnu`` will depend on the outcome of the :class:`OSUBuildTest` using ``PrgEnv-gnu``, but not on the outcome of the :class:`OSUBuildTest` using ``PrgEnv-intel``. +The :func:`depends_on() ` function will create dependencies between the individual test cases of the :class:`OSUBuildTest` and the :class:`OSUDownloadTest`, such that all the test case of :class:`OSUBuildTest` will depend on the outcome of the :class:`OSUDownloadTest`. This behaviour can be changed, but it is covered in detail in :doc:`dependencies`. You can create arbitrary test dependency graphs, but they need to be acyclic. If ReFrame detects cyclic dependencies, it will refuse to execute the set of tests and will issue an error pointing out the cycle. @@ -42,27 +37,51 @@ If ReFrame detects cyclic dependencies, it will refuse to execute the set of tes A ReFrame test with dependencies will execute, i.e., enter its `setup` stage, only after `all` of its dependencies have succeeded. If any of its dependencies fails, the current test will be marked as failure as well. -The next step for the :class:`OSULatencyTest` is to set its executable to point to the binary produced by the :class:`OSUBuildTest`. +The next step for the :class:`OSUBuildTest` is to set its sourcesdir to point to the source code that was fetched by the :class:`OSUDownloadTest`. This is achieved with the following specially decorated function: .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 37-43 + :lines: 104-109 The :func:`@require_deps ` decorator will bind the arguments passed to the decorated function to the result of the dependency that each argument names. -In this case, it binds the ``OSUBuildTest`` function argument to the result of a dependency named ``OSUBuildTest``. +In this case, it binds the ``OSUDownloadTest`` function argument to the result of a dependency named ``OSUDownloadTest``. In order for the binding to work correctly the function arguments must be named after the target dependencies. However, referring to a dependency only by the test's name is not enough, since a test might be associated with multiple programming environments. For this reason, a dependency argument is actually bound to a function that accepts as argument the name of a target programming environment. -If no arguments are passed to that function, as in this example, the current programming environment is implied, such that ``OSUBuildTest()`` is equivalent to ``OSUBuildTest(self.current_environ.name)``. +If no arguments are passed to that function, as in this example, the current programming environment is implied, such that ``OSUDownloadTest()`` is equivalent to ``OSUDownloadTest(self.current_environ.name, self.current_partition.name)``. This call returns the actual test case of the dependency that has been executed. -This allows you to access any attribute from the target test, as we do in this example by accessing the target test's stage directory, which we use to construct the path of the executable. +This allows you to access any attribute from the target test, as we do in this example by accessing the target test's stage directory, which we use to construct the sourcesdir of the test. + +For the next test we need to use the OSU benchmark binaries that we just built, so as to run the MPI ping-pong benchmark. +Here is the relevant part: + +.. literalinclude:: ../tutorials/deps/osu_benchmarks.py + :lines: 13-44 + +First, since we will have multiple similar benchmarks, we move all the common functionality to the :class:`OSUBenchmarkTestBase` base class. +Again nothing new here; we are going to use two nodes for the benchmark and we set :attr:`sourcesdir ` to ``None``, since none of the benchmark tests will use any additional resources. +As done previously, we define the dependencies with the the following line: + +.. literalinclude:: ../tutorials/deps/osu_benchmarks.py + :lines: 33 + +Here we tell ReFrame that this test depends on a test named ``OSUBuildTest`` "by environment." +This means that the test cases of this test will only depend on the test cases of the ``OSUBuildTest`` that use the same environment; +partitions may be different. + +The next step for the :class:`OSULatencyTest` is to set its executable to point to the binary produced by the :class:`OSUBuildTest`. +This is achieved with the following specially decorated function: + +.. literalinclude:: ../tutorials/deps/osu_benchmarks.py + :lines: 38-44 + This concludes the presentation of the :class:`OSULatencyTest` test. The :class:`OSUBandwidthTest` is completely analogous. The :class:`OSUAllreduceTest` shown below is similar to the other two, except that it is parameterized. It is essentially a scalability test that is running the ``osu_allreduce`` executable created by the :class:`OSUBuildTest` for 2, 4, 8 and 16 nodes. .. literalinclude:: ../tutorials/deps/osu_benchmarks.py - :lines: 69-89 + :lines: 70-90 The full set of OSU example tests is shown below: @@ -77,22 +96,29 @@ Here is the output when running the OSU tests with the asynchronous execution po .. code-block:: none [ReFrame Setup] - version: 3.3-dev0 (rev: cb974c13) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/deps/osu_benchmarks.py -r' - launched by: user@dom101 - working directory: '/users/user/Devel/reframe' + version: 3.3-dev1 (rev: 734a53df) + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -C tutorials/config/settings.py -r' + launched by: user@daint103 + working directory: '/path/to/reframe' settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + check search path: '/path/to/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' + + [==========] Running 8 check(s) + [==========] Started on Tue Nov 3 13:20:28 2020 - [==========] Running 7 check(s) - [==========] Started on Mon Oct 12 20:19:40 2020 + [----------] started processing OSUDownloadTest (OSU benchmarks download sources) + [ RUN ] OSUDownloadTest on daint:login using gnu + [----------] finished processing OSUDownloadTest (OSU benchmarks download sources) [----------] started processing OSUBuildTest (OSU benchmarks build test) - [ RUN ] OSUBuildTest on daint:gpu using gnu - [ RUN ] OSUBuildTest on daint:gpu using intel - [ RUN ] OSUBuildTest on daint:gpu using pgi + [ RUN ] OSUBuildTest on daint:login using gnu + [ DEP ] OSUBuildTest on daint:login using gnu + [ RUN ] OSUBuildTest on daint:login using intel + [ DEP ] OSUBuildTest on daint:login using intel + [ RUN ] OSUBuildTest on daint:login using pgi + [ DEP ] OSUBuildTest on daint:login using pgi [----------] finished processing OSUBuildTest (OSU benchmarks build test) [----------] started processing OSULatencyTest (OSU latency test) @@ -150,32 +176,33 @@ Here is the output when running the OSU tests with the asynchronous execution po [----------] finished processing OSUAllreduceTest_16 (OSU Allreduce test) [----------] waiting for spawned checks to finish - [ OK ] ( 1/21) OSUBuildTest on daint:gpu using pgi [compile: 28.225s run: 0.040s total: 28.277s] - [ OK ] ( 2/21) OSUBuildTest on daint:gpu using gnu [compile: 21.495s run: 66.686s total: 88.208s] - [ OK ] ( 3/21) OSUBuildTest on daint:gpu using intel [compile: 38.376s run: 37.468s total: 75.855s] - [ OK ] ( 4/21) OSUAllreduceTest_16 on daint:gpu using pgi [compile: 0.005s run: 14.180s total: 14.197s] - [ OK ] ( 5/21) OSUAllreduceTest_16 on daint:gpu using gnu [compile: 0.008s run: 17.997s total: 18.736s] - [ OK ] ( 6/21) OSUAllreduceTest_4 on daint:gpu using pgi [compile: 0.007s run: 18.581s total: 21.528s] - [ OK ] ( 7/21) OSUAllreduceTest_2 on daint:gpu using pgi [compile: 0.005s run: 45.562s total: 49.983s] - [ OK ] ( 8/21) OSUAllreduceTest_8 on daint:gpu using pgi [compile: 0.006s run: 49.313s total: 50.789s] - [ OK ] ( 9/21) OSUAllreduceTest_8 on daint:gpu using gnu [compile: 0.006s run: 48.884s total: 51.096s] - [ OK ] (10/21) OSUAllreduceTest_4 on daint:gpu using gnu [compile: 0.007s run: 48.169s total: 51.854s] - [ OK ] (11/21) OSULatencyTest on daint:gpu using pgi [compile: 0.006s run: 53.398s total: 60.785s] - [ OK ] (12/21) OSUAllreduceTest_2 on daint:gpu using gnu [compile: 0.005s run: 56.394s total: 61.531s] - [ OK ] (13/21) OSULatencyTest on daint:gpu using gnu [compile: 0.005s run: 55.499s total: 63.628s] - [ OK ] (14/21) OSUAllreduceTest_2 on daint:gpu using intel [compile: 0.006s run: 67.665s total: 70.079s] - [ OK ] (15/21) OSUAllreduceTest_16 on daint:gpu using intel [compile: 0.005s run: 73.259s total: 73.275s] - [ OK ] (16/21) OSULatencyTest on daint:gpu using intel [compile: 0.006s run: 97.960s total: 101.936s] - [ OK ] (17/21) OSUAllreduceTest_8 on daint:gpu using intel [compile: 0.006s run: 101.123s total: 101.933s] - [ OK ] (18/21) OSUAllreduceTest_4 on daint:gpu using intel [compile: 0.007s run: 100.592s total: 102.215s] - [ OK ] (19/21) OSUBandwidthTest on daint:gpu using pgi [compile: 0.005s run: 117.530s total: 123.408s] - [ OK ] (20/21) OSUBandwidthTest on daint:gpu using gnu [compile: 0.005s run: 117.174s total: 123.765s] - [ OK ] (21/21) OSUBandwidthTest on daint:gpu using intel [compile: 0.005s run: 160.484s total: 163.680s] + [ OK ] ( 1/22) OSUDownloadTest on daint:login using gnu [compile: 0.005s run: 3.373s total: 3.408s] + [ OK ] ( 2/22) OSUBuildTest on daint:login using gnu [compile: 22.410s run: 0.035s total: 87.728s] + [ OK ] ( 3/22) OSUBuildTest on daint:login using pgi [compile: 27.725s run: 59.918s total: 87.691s] + [ OK ] ( 4/22) OSUBuildTest on daint:login using intel [compile: 37.437s run: 32.771s total: 98.034s] + [ OK ] ( 5/22) OSUAllreduceTest_2 on daint:gpu using pgi [compile: 0.007s run: 139.339s total: 144.402s] + [ OK ] ( 6/22) OSUAllreduceTest_4 on daint:gpu using pgi [compile: 0.007s run: 140.896s total: 144.395s] + [ OK ] ( 7/22) OSUAllreduceTest_8 on daint:gpu using pgi [compile: 0.006s run: 142.451s total: 144.372s] + [ OK ] ( 8/22) OSUAllreduceTest_16 on daint:gpu using pgi [compile: 0.005s run: 144.342s total: 144.368s] + [ OK ] ( 9/22) OSUAllreduceTest_4 on daint:gpu using gnu [compile: 0.007s run: 140.555s total: 144.833s] + [ OK ] (10/22) OSUAllreduceTest_16 on daint:gpu using gnu [compile: 0.005s run: 143.642s total: 144.778s] + [ OK ] (11/22) OSUAllreduceTest_8 on daint:gpu using gnu [compile: 0.007s run: 142.456s total: 145.151s] + [ OK ] (12/22) OSUAllreduceTest_2 on daint:gpu using gnu [compile: 0.005s run: 139.685s total: 145.510s] + [ OK ] (13/22) OSUBandwidthTest on daint:gpu using gnu [compile: 0.009s run: 193.440s total: 200.818s] + [ OK ] (14/22) OSUBandwidthTest on daint:gpu using pgi [compile: 0.006s run: 194.465s total: 201.080s] + [ OK ] (15/22) OSULatencyTest on daint:gpu using intel [compile: 0.009s run: 278.603s total: 283.389s] + [ OK ] (16/22) OSUAllreduceTest_4 on daint:gpu using intel [compile: 0.006s run: 281.112s total: 283.365s] + [ OK ] (17/22) OSULatencyTest on daint:gpu using pgi [compile: 0.006s run: 285.499s total: 293.712s] + [ OK ] (18/22) OSUAllreduceTest_2 on daint:gpu using intel [compile: 0.006s run: 280.693s total: 283.756s] + [ OK ] (19/22) OSUAllreduceTest_8 on daint:gpu using intel [compile: 0.006s run: 282.550s total: 283.971s] + [ OK ] (20/22) OSUAllreduceTest_16 on daint:gpu using intel [compile: 0.005s run: 284.573s total: 284.596s] + [ OK ] (21/22) OSULatencyTest on daint:gpu using gnu [compile: 0.006s run: 286.186s total: 295.202s] + [ OK ] (22/22) OSUBandwidthTest on daint:gpu using intel [compile: 0.005s run: 340.005s total: 343.927s] [----------] all spawned checks have finished - [ PASSED ] Ran 21 test case(s) from 7 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 20:24:02 2020 - Log file(s) saved in: '/tmp/rfm-m5zww8le.log' + [ PASSED ] Ran 22 test case(s) from 8 check(s) (0 failure(s)) + [==========] Finished on Tue Nov 3 13:27:54 2020 + Log file(s) saved in: '/tmp/rfm-n4lrqiqf.log' Before starting running the tests, ReFrame topologically sorts them based on their dependencies and schedules them for running using the selected execution policy. With the serial execution policy, ReFrame simply executes the tests to completion as they "arrive", since the tests are already topologically sorted. diff --git a/docs/utility_functions_reference.rst b/docs/utility_functions_reference.rst index 82c617031c..068a51ac00 100644 --- a/docs/utility_functions_reference.rst +++ b/docs/utility_functions_reference.rst @@ -31,3 +31,13 @@ Type Checking Utilities .. automodule:: reframe.utility.typecheck :members: :show-inheritance: + + +.. _test-case-deps-management: + +Test Case Dependencies Management +--------------------------------- + +.. automodule:: reframe.utility.udeps + :members: + :show-inheritance: diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 2276c7dd64..f794a99e98 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -29,6 +29,7 @@ import reframe.utility.osext as osext import reframe.utility.sanity as sn import reframe.utility.typecheck as typ +import reframe.utility.udeps as udeps from reframe.core.backends import (getlauncher, getscheduler) from reframe.core.buildsystems import BuildSystemField from reframe.core.containers import ContainerPlatform, ContainerPlatformField @@ -1593,51 +1594,119 @@ def cleanup(self, remove_files=False): def user_deps(self): return util.SequenceView(self._userdeps) - def depends_on(self, target, how=DEPEND_BY_ENV, subdeps=None): - '''Add a dependency to ``target`` in this test. - - :arg target: The name of the target test. - :arg how: How the dependency should be mapped in the test cases space. - This argument can accept any of the three constants - :attr:`DEPEND_EXACT`, :attr:`DEPEND_BY_ENV` (default), - :attr:`DEPEND_FULLY`. - - :arg subdeps: An adjacency list representation of how this test's test - cases depend on those of the target test. This is only relevant if - ``how == DEPEND_EXACT``. The value of this argument is a - dictionary having as keys the names of this test's supported - programming environments. The values are lists of the programming - environments names of the target test that this test's test cases - will depend on. In the following example, this test's ``E0`` - programming environment case will depend on both ``E0`` and ``E1`` - test cases of the target test ``T0``, but its ``E1`` case will - depend only on the ``E1`` test case of ``T0``: + def _depends_on_func(self, how, subdeps=None, *args, **kwargs): + if args or kwargs: + raise ValueError('invalid arguments passed') + + user_deprecation_warning("passing 'how' as an integer or passing " + "'subdeps' is deprecated; please have a " + "look at the user documentation") + + if (subdeps is not None and + not isinstance(subdeps, typ.Dict[str, typ.List[str]])): + raise TypeError("subdeps argument must be of type " + "`Dict[str, List[str]]' or `None'") + + # Now return a proper when function + def exact(src, dst): + if not subdeps: + return False + + p0, e0 = src + p1, e1 = dst + + # DEPEND_EXACT allows dependencies inside the same partition + return ((p0 == p1) and (e0 in subdeps) and (e1 in subdeps[e0])) + + # Follow the old definitions + # DEPEND_BY_ENV used to mean same env and same partition + if how == DEPEND_BY_ENV: + return udeps.by_case + # DEPEND_BY_ENV used to mean same partition + elif how == DEPEND_FULLY: + return udeps.by_part + elif how == DEPEND_EXACT: + return exact + else: + raise ValueError(f"unknown value passed to 'how' argument: {how}") + + def depends_on(self, target, how=None, *args, **kwargs): + '''Add a dependency to another test. + + :arg target: The name of the test that this one will depend on. + :arg how: A callable that defines how the test cases of this test + depend on the the test cases of the target test. + This callable should accept two arguments: + + - The source test case (i.e., a test case of this test) + represented as a two-element tuple containing the names of the + partition and the environment of the current test case. + - Test destination test case (i.e., a test case of the target + test) represented as a two-element tuple containing the names of + the partition and the environment of the current target test + case. + + It should return :class:`True` if a dependency between the source + and destination test cases exists, :class:`False` otherwise. + + This function will be called multiple times by the framework when + the test DAG is constructed, in order to determine the + connectivity of the two tests. + + In the following example, this test depends on ``T1`` when their + partitions match, otherwise their test cases are independent. .. code-block:: python - self.depends_on('T0', how=rfm.DEPEND_EXACT, - subdeps={'E0': ['E0', 'E1'], 'E1': ['E1']}) + def by_part(src, dst): + p0, _ = src + p1, _ = dst + return p0 == p1 + + self.depends_on('T0', how=by_part) + + The framework offers already a set of predefined relations between + the test cases of inter-dependent tests. See the + :mod:`reframe.utility.udeps` for more details. + + The default ``how`` function is + :func:`reframe.utility.udeps.by_case`, where test cases on + different partitions and environments are independent. + + .. seealso:: + - :doc:`dependencies` + - :ref:`test-case-deps-management` + - For more details on how test dependencies work in ReFrame, please - refer to `How Test Dependencies Work In ReFrame `__. .. versionadded:: 2.21 + .. versionchanged:: 3.3 + Dependencies between test cases from different partitions are now + allowed. The ``how`` argument now accepts a callable. + + .. deprecated:: 3.3 + Passing an integer to the ``how`` argument as well as using the + ``subdeps`` argument is deprecated. + ''' if not isinstance(target, str): raise TypeError("target argument must be of type: `str'") - if not isinstance(how, int): - raise TypeError("how argument must be of type: `int'") + if (isinstance(how, int)): + # We are probably using the old syntax; try to get a + # proper how function + how = self._depends_on_func(how, *args, **kwargs) - if (subdeps is not None and - not isinstance(subdeps, typ.Dict[str, typ.List[str]])): - raise TypeError("subdeps argument must be of type " - "`Dict[str, List[str]]' or `None'") + if how is None: + how = udeps.by_case + + if not callable(how): + raise TypeError("'how' argument must be callable") - self._userdeps.append((target, how, subdeps)) + self._userdeps.append((target, how)) - def getdep(self, target, environ=None): + def getdep(self, target, environ=None, part=None): '''Retrieve the test case of a target dependency. This is a low-level method. The :func:`@require_deps @@ -1660,15 +1729,20 @@ def getdep(self, target, environ=None): if environ is None: environ = self.current_environ.name + if part is None: + part = self.current_partition.name + if self._case is None or self._case() is None: raise DependencyError('no test case is associated with this test') for d in self._case().deps: - if d.check.name == target and d.environ.name == environ: + if (d.check.name == target and + d.environ.name == environ and + d.partition.name == part): return d.check - raise DependencyError('could not resolve dependency to (%s, %s)' % - (target, environ)) + raise DependencyError(f'could not resolve dependency to ({target!r}, ' + f'{part!r}, {environ!r})') def __str__(self): return "%s(name='%s', prefix='%s')" % (type(self).__name__, diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 3e0c4839fc..d356354d54 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -23,7 +23,7 @@ import reframe.core.warnings as warnings import reframe.frontend.argparse as argparse import reframe.frontend.check_filters as filters -import reframe.frontend.dependency as dependency +import reframe.frontend.dependencies as dependencies import reframe.utility.jsonext as jsonext import reframe.utility.osext as osext from reframe.frontend.executors import Runner, generate_testcases @@ -640,9 +640,9 @@ def print_infoline(param, value): options.skip_system_check, options.skip_prgenv_check, allowed_environs) - testgraph = dependency.build_deps(testcases) - dependency.validate_deps(testgraph) - testcases = dependency.toposort(testgraph) + testgraph = dependencies.build_deps(testcases) + dependencies.validate_deps(testgraph) + testcases = dependencies.toposort(testgraph) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): diff --git a/reframe/frontend/dependency.py b/reframe/frontend/dependencies.py similarity index 71% rename from reframe/frontend/dependency.py rename to reframe/frontend/dependencies.py index 04f81c9003..f1e2f0af58 100644 --- a/reframe/frontend/dependency.py +++ b/reframe/frontend/dependencies.py @@ -24,39 +24,29 @@ def build_deps(cases, default_cases=None): ''' # Index cases for quick access - def build_partition_index(cases): - if cases is None: - return {} - - ret = {} - for c in cases: - cname, pname = c.check.name, c.partition.fullname - ret.setdefault((cname, pname), []) - ret[cname, pname].append(c) - - return ret - - def build_cases_index(cases): + def build_index(cases): if cases is None: return {} ret = {} for c in cases: cname = c.check.name - pname = c.partition.fullname - ename = c.environ.name - ret.setdefault((cname, pname, ename), c) + ret.setdefault(cname, []) + ret[cname].append(c) return ret - def resolve_dep(target, from_map, fallback_map, *args): - errmsg = 'could not resolve dependency: %s -> %s' % (target, args) + all_cases_map = build_index(cases) + default_cases_map = build_index(default_cases) + + def resolve_dep(src, dst): + errmsg = f'could not resolve dependency: {src!r} -> {dst!r}' try: - ret = from_map[args] + ret = all_cases_map[dst] except KeyError: # try to resolve the dependency in the fallback map try: - ret = fallback_map[args] + ret = default_cases_map[dst] except KeyError: raise DependencyError(errmsg) from None @@ -65,11 +55,6 @@ def resolve_dep(target, from_map, fallback_map, *args): return ret - cases_by_part = build_partition_index(cases) - cases_revmap = build_cases_index(cases) - default_cases_by_part = build_partition_index(default_cases) - default_cases_revmap = build_cases_index(default_cases) - # NOTE on variable names # # c stands for check or case depending on the context @@ -81,29 +66,15 @@ def resolve_dep(target, from_map, fallback_map, *args): # partitions and environments graph = collections.OrderedDict() for c in cases: - cname = c.check.name - pname = c.partition.fullname - ename = c.environ.name + psrc = c.partition.name + esrc = c.environ.name for dep in c.check.user_deps(): - tname, how, subdeps = dep - if how == rfm.DEPEND_FULLY: - c.deps.extend(resolve_dep(c, cases_by_part, - default_cases_by_part, tname, pname)) - elif how == rfm.DEPEND_BY_ENV: - c.deps.append( - resolve_dep(c, cases_revmap, default_cases_revmap, - tname, pname, ename) - ) - elif how == rfm.DEPEND_EXACT: - for env, tenvs in subdeps.items(): - if env != ename: - continue - - for te in tenvs: - c.deps.append( - resolve_dep(c, cases_revmap, default_cases_revmap, - tname, pname, te) - ) + tname, when = dep + for d in resolve_dep(c, tname): + pdst = d.partition.name + edst = d.environ.name + if when((psrc, esrc), (pdst, edst)): + c.deps.append(d) graph[c] = util.OrderedSet(c.deps) diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 4b2b9accfe..2978b565f7 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -13,7 +13,7 @@ import reframe.core.environments as env import reframe.core.logging as logging import reframe.core.runtime as runtime -import reframe.frontend.dependency as dependency +import reframe.frontend.dependencies as dependencies from reframe.core.exceptions import (AbortTaskError, JobNotStartedError, ReframeForceExitError, TaskExit) from reframe.core.schedulers.local import LocalJobScheduler @@ -402,8 +402,8 @@ def _retry_failed(self, cases): # Clone failed cases and rebuild dependencies among them failed_cases = [t.testcase.clone() for t in failures] - cases_graph = dependency.build_deps(failed_cases, cases) - failed_cases = dependency.toposort(cases_graph, is_subgraph=True) + cases_graph = dependencies.build_deps(failed_cases, cases) + failed_cases = dependencies.toposort(cases_graph, is_subgraph=True) self._runall(failed_cases) failures = self._stats.failures() diff --git a/reframe/utility/udeps.py b/reframe/utility/udeps.py new file mode 100644 index 0000000000..147529c967 --- /dev/null +++ b/reframe/utility/udeps.py @@ -0,0 +1,166 @@ +# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +'''Managing the test case "micro-dependencies" between two tests. + +This module defines a set of basic functions that can be used with the ``how`` +argument of the :func:`reframe.core.pipeline.RegressionTest.depends_on` +function to control how the individual dependencies between the test cases of +two tests are formed. + +All functions take two arguments, the source and destination vertices of an +edge in the test case dependency subgraph that connects two tests. In the +relation *"T0 depends on T1"*, the source are the test cases of "T0" and the +destination are the test cases of "T1." The source and destination arguments +are two-element tuples containing the names of the partition and the +environment of the corresponding test cases. These functions return +:class:`True` if there is an edge connecting the two test cases or +:class:`False` otherwise. + +A ``how`` function will be called by the framework multiple times when the +test DAG is built. More specifically, for each test dependency relation, it +will be called once for each test case combination of the two tests. + +The ``how`` functions essentially split the test case subgraph of two +dependent tests into fully connected components based on the values of their +supported partitions and environments. + +The :doc:`dependencies` page contains more information about test dependencies +and shows visually the test case subgraph connectivity that the different +``how`` functions described here achieve. + + +.. versionadded:: 3.3 + +''' + +import builtins + + +def fully(src, dst): + '''The test cases of two dependent tests will be fully connected.''' + + return True + + +def by_part(src, dst): + '''The test cases of two dependent tests will be split by partition. + + Test cases from different partitions are independent. + ''' + + return src[0] == dst[0] + + +def by_xpart(src, dst): + '''The test cases of two dependent tests will be split by the exclusive + disjunction (XOR) of their partitions. + + Test cases from the same partition are independent. + ''' + + return src[0] != dst[0] + + +def by_env(src, dst): + '''The test cases of two dependent tests will be split by environment. + + Test cases from different environments are independent. + ''' + + return src[1] == dst[1] + + +def by_xenv(src, dst): + '''The test cases of two dependent tests will be split by the exclusive + disjunction (XOR) of their environments. + + Test cases from the same environment are independent. + ''' + + return src[1] != dst[1] + + +def by_case(src, dst): + '''The test cases of two dependent tests will be split by partition and by + environment. + + Test cases from different partitions and different environments are + independent. + ''' + + return src == dst + + +def by_xcase(src, dst): + '''The test cases of two dependent tests will be split by the exclusive + disjunction (XOR) of their partitions and environments. + + Test cases from the same environment and the same partition are + independent. + ''' + + return src != dst + + +# Undocumented 'how' functions +def part_is(name): + def _part_is(src, dst): + if src and dst: + return src[0] == name and dst[0] == name + + if src: + return src[0] == name + + if dst: + return dst[0] == name + + return False + + return _part_is + + +def env_is(name): + def _env_is(src, dst): + if src and dst: + return src[1] == name and dst[1] == name + + if src: + return src[1] == name + + if dst: + return dst[1] == name + + return False + + return _env_is + + +def source(fn): + def _source(src, dst): + return fn(src, None) + + return _source + + +def dest(fn): + def _dest(src, dst): + return fn(None, dst) + + return _dest + + +def any(*when_funcs): + def _any(src, dst): + return builtins.any(fn(src, dst) for fn in when_funcs) + + return _any + + +def all(*when_funcs): + def _all(src, dst): + return builtins.all(fn(src, dst) for fn in when_funcs) + + return _all diff --git a/tutorials/deps/osu_benchmarks.py b/tutorials/deps/osu_benchmarks.py index 25dea2d067..45647b3704 100644 --- a/tutorials/deps/osu_benchmarks.py +++ b/tutorials/deps/osu_benchmarks.py @@ -7,6 +7,7 @@ import reframe as rfm import reframe.utility.sanity as sn +import reframe.utility.udeps as udeps class OSUBenchmarkTestBase(rfm.RunOnlyRegressionTest): @@ -29,7 +30,7 @@ def __init__(self): self.perf_patterns = { 'latency': sn.extractsingle(r'^8\s+(\S+)', self.stdout, 1, float) } - self.depends_on('OSUBuildTest') + self.depends_on('OSUBuildTest', udeps.by_env) self.reference = { '*': {'latency': (0, None, None, 'us')} } @@ -37,8 +38,8 @@ def __init__(self): @rfm.require_deps def set_executable(self, OSUBuildTest): self.executable = os.path.join( - OSUBuildTest().stagedir, - 'osu-micro-benchmarks-5.6.2', 'mpi', 'pt2pt', 'osu_latency' + OSUBuildTest(part='login').stagedir, + 'mpi', 'pt2pt', 'osu_latency' ) self.executable_opts = ['-x', '100', '-i', '1000'] @@ -52,7 +53,7 @@ def __init__(self): 'bandwidth': sn.extractsingle(r'^4194304\s+(\S+)', self.stdout, 1, float) } - self.depends_on('OSUBuildTest') + self.depends_on('OSUBuildTest', udeps.by_env) self.reference = { '*': {'bandwidth': (0, None, None, 'MB/s')} } @@ -60,8 +61,8 @@ def __init__(self): @rfm.require_deps def set_executable(self, OSUBuildTest): self.executable = os.path.join( - OSUBuildTest().stagedir, - 'osu-micro-benchmarks-5.6.2', 'mpi', 'pt2pt', 'osu_bw' + OSUBuildTest(part='login').stagedir, + 'mpi', 'pt2pt', 'osu_bw' ) self.executable_opts = ['-x', '100', '-i', '1000'] @@ -74,7 +75,7 @@ def __init__(self, num_tasks): self.perf_patterns = { 'latency': sn.extractsingle(r'^8\s+(\S+)', self.stdout, 1, float) } - self.depends_on('OSUBuildTest') + self.depends_on('OSUBuildTest', udeps.by_env) self.reference = { '*': {'latency': (0, None, None, 'us')} } @@ -83,8 +84,8 @@ def __init__(self, num_tasks): @rfm.require_deps def set_executable(self, OSUBuildTest): self.executable = os.path.join( - OSUBuildTest().stagedir, - 'osu-micro-benchmarks-5.6.2', 'mpi', 'collective', 'osu_allreduce' + OSUBuildTest(part='login').stagedir, + 'mpi', 'collective', 'osu_allreduce' ) self.executable_opts = ['-m', '8', '-x', '1000', '-i', '20000'] @@ -93,14 +94,30 @@ def set_executable(self, OSUBuildTest): class OSUBuildTest(rfm.CompileOnlyRegressionTest): def __init__(self): self.descr = 'OSU benchmarks build test' - self.valid_systems = ['daint:gpu'] + self.valid_systems = ['daint:login'] self.valid_prog_environs = ['gnu', 'pgi', 'intel'] - self.sourcesdir = None - self.prebuild_cmds = [ - 'wget http://mvapich.cse.ohio-state.edu/download/mvapich/osu-micro-benchmarks-5.6.2.tar.gz', - 'tar xzf osu-micro-benchmarks-5.6.2.tar.gz', - 'cd osu-micro-benchmarks-5.6.2' - ] + self.depends_on('OSUDownloadTest', udeps.fully) self.build_system = 'Autotools' self.build_system.max_concurrency = 8 self.sanity_patterns = sn.assert_not_found('error', self.stderr) + + @rfm.require_deps + def set_sourcedir(self, OSUDownloadTest): + self.sourcesdir = os.path.join( + OSUDownloadTest(environ='gnu').stagedir, + 'osu-micro-benchmarks-5.6.2' + ) + + +@rfm.simple_test +class OSUDownloadTest(rfm.RunOnlyRegressionTest): + def __init__(self): + self.descr = 'OSU benchmarks download sources' + self.valid_systems = ['daint:login'] + self.valid_prog_environs = ['gnu'] + self.executable = 'wget' + self.executable_opts = ['http://mvapich.cse.ohio-state.edu/download/mvapich/osu-micro-benchmarks-5.6.2.tar.gz'] + self.postrun_cmds = [ + 'tar xzf osu-micro-benchmarks-5.6.2.tar.gz' + ] + self.sanity_patterns = sn.assert_not_found('error', self.stderr) diff --git a/unittests/resources/checks_unlisted/deps_simple.py b/unittests/resources/checks_unlisted/deps_simple.py index c0b9c7e8f7..d6e7cdfbe9 100644 --- a/unittests/resources/checks_unlisted/deps_simple.py +++ b/unittests/resources/checks_unlisted/deps_simple.py @@ -5,6 +5,7 @@ import reframe as rfm import reframe.utility.sanity as sn +import reframe.utility.udeps as udeps @rfm.simple_test @@ -17,14 +18,30 @@ def __init__(self): self.sanity_patterns = sn.assert_found(self.name, self.stdout) -@rfm.parameterized_test(*([kind] for kind in ['fully', 'by_env', - 'exact', 'default'])) +@rfm.parameterized_test(*([kind] for kind in ['default', 'fully', + 'by_part', 'by_case', + 'custom', 'any', 'all', + 'nodeps'])) class Test1(rfm.RunOnlyRegressionTest): def __init__(self, kind): + def custom_deps(src, dst): + return ( + src[0] == 'p0' and + src[1] == 'e0' and + dst[0] == 'p1' and + dst[1] == 'e1' + ) + kindspec = { - 'fully': rfm.DEPEND_FULLY, - 'by_env': rfm.DEPEND_BY_ENV, - 'exact': rfm.DEPEND_EXACT, + 'fully': udeps.fully, + 'by_part': udeps.by_part, + 'by_case': udeps.by_case, + 'any': udeps.any(udeps.source(udeps.part_is('p0')), + udeps.dest(udeps.env_is('e1'))), + 'all': udeps.all(udeps.part_is('p0'), + udeps.dest(udeps.env_is('e0'))), + 'custom': custom_deps, + 'nodeps': lambda s, d: False, } self.valid_systems = ['sys0:p0', 'sys0:p1'] self.valid_prog_environs = ['e0', 'e1'] @@ -33,8 +50,5 @@ def __init__(self, kind): self.sanity_patterns = sn.assert_found(self.name, self.stdout) if kind == 'default': self.depends_on('Test0') - elif kindspec[kind] == rfm.DEPEND_EXACT: - self.depends_on('Test0', kindspec[kind], - {'e0': ['e0', 'e1'], 'e1': ['e1']}) else: self.depends_on('Test0', kindspec[kind]) diff --git a/unittests/test_dependencies.py b/unittests/test_dependencies.py index 2d0ffacb6e..a7e158bfe1 100644 --- a/unittests/test_dependencies.py +++ b/unittests/test_dependencies.py @@ -8,11 +8,13 @@ import reframe as rfm import reframe.core.runtime as rt -import reframe.frontend.dependency as dependency +import reframe.frontend.dependencies as dependencies import reframe.frontend.executors as executors import reframe.utility as util +import reframe.utility.udeps as udeps from reframe.core.environments import Environment from reframe.core.exceptions import DependencyError +from reframe.core.warnings import ReframeDeprecationWarning from reframe.frontend.loader import RegressionCheckLoader import unittests.fixtures as fixtures @@ -72,9 +74,11 @@ def find_check(name, checks): return None -def find_case(cname, ename, cases): +def find_case(cname, ename, partname, cases): for c in cases: - if c.check.name == cname and c.environ.name == ename: + if (c.check.name == cname and + c.environ.name == ename and + c.partition.name == partname): return c @@ -102,8 +106,8 @@ def loader(): def test_eq_hash(loader, exec_ctx): cases = executors.generate_testcases(loader.load_all()) - case0 = find_case('Test0', 'e0', cases) - case1 = find_case('Test0', 'e1', cases) + case0 = find_case('Test0', 'e0', 'p0', cases) + case1 = find_case('Test0', 'e1', 'p0', cases) case0_copy = case0.clone() assert case0 == case0_copy @@ -112,140 +116,413 @@ def test_eq_hash(loader, exec_ctx): assert hash(case1) != hash(case0) +def test_dependecies_how_functions(): + t0_cases = [(p, e) + for p in ['p0', 'p1'] + for e in ['e0', 'e1', 'e2']] + t1_cases = [(p, e) + for p in ['p0', 'p1', 'p2'] + for e in ['e0', 'e1']] + + how = udeps.fully + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + } + assert len(deps) == 36 + + how = udeps.by_part + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if t0[0] == t1[0] + } + assert len(deps) == 12 + + how = udeps.by_xpart + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if t0[0] != t1[0] + } + assert len(deps) == 24 + + how = udeps.by_env + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) for t0 in t0_cases + for t1 in t1_cases + if t0[1] == t1[1] + } + assert len(deps) == 12 + + how = udeps.by_xenv + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) for t0 in t0_cases + for t1 in t1_cases + if t0[1] != t1[1] + } + assert len(deps) == 24 + + how = udeps.by_case + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if (t0[0] == t1[0] and t0[1] == t1[1]) + } + assert len(deps) == 4 + + how = udeps.by_xcase + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if (t0[0] != t1[0] or t0[1] != t1[1]) + } + assert len(deps) == 32 + + +def test_dependecies_how_functions_undoc(): + t0_cases = [(p, e) + for p in ['p0', 'p1'] + for e in ['e0', 'e1', 'e2']] + t1_cases = [(p, e) + for p in ['p0', 'p1', 'p2'] + for e in ['e0', 'e1']] + + how = udeps.part_is('p0') + deps = {(t0, t1) for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if (t0[0] == 'p0' and t1[0] == 'p0') + } + assert len(deps) == 6 + + how = udeps.source(udeps.part_is('p0')) + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if t0[0] == 'p0' + } + assert len(deps) == 18 + + how = udeps.dest(udeps.part_is('p0')) + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if t1[0] == 'p0' + } + assert len(deps) == 12 + + how = udeps.env_is('e0') + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if (t0[1] == 'e0' and t1[1] == 'e0') + } + assert len(deps) == 6 + + how = udeps.source(udeps.env_is('e0')) + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if t0[1] == 'e0' + } + assert len(deps) == 12 + + how = udeps.dest(udeps.env_is('e0')) + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if t1[1] == 'e0' + } + assert len(deps) == 18 + + how = udeps.any(udeps.source(udeps.part_is('p0')), + udeps.dest(udeps.env_is('e1'))) + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if (t0[0] == 'p0' or t1[1] == 'e1') + } + assert len(deps) == 27 + + how = udeps.all(udeps.source(udeps.part_is('p0')), + udeps.dest(udeps.env_is('e1'))) + deps = {(t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if how(t0, t1)} + assert deps == { + (t0, t1) + for t0 in t0_cases + for t1 in t1_cases + if (t0[0] == 'p0' and t1[1] == 'e1') + } + assert len(deps) == 9 + + +def test_build_deps_deprecated_syntax(loader, exec_ctx): + class Test0(rfm.RegressionTest): + def __init__(self): + self.valid_systems = ['sys0:p0', 'sys0:p1'] + self.valid_prog_environs = ['e0', 'e1'] + self.executable = 'echo' + self.executable_opts = [self.name] + self.sanity_patterns = sn.assert_found(self.name, self.stdout) + + @rfm.parameterized_test(*([kind] for kind in ['fully', 'by_case', + 'exact'])) + class Test1_deprecated(rfm.RunOnlyRegressionTest): + def __init__(self, kind): + kindspec = { + 'fully': rfm.DEPEND_FULLY, + 'by_case': rfm.DEPEND_BY_ENV, + 'exact': rfm.DEPEND_EXACT, + } + self.valid_systems = ['sys0:p0', 'sys0:p1'] + self.valid_prog_environs = ['e0', 'e1'] + self.executable = 'echo' + self.executable_opts = [self.name] + if kindspec[kind] == rfm.DEPEND_EXACT: + self.depends_on('Test0', kindspec[kind], + {'e0': ['e0', 'e1'], 'e1': ['e1']}) + else: + self.depends_on('Test0', kindspec[kind]) + + with pytest.warns(ReframeDeprecationWarning): + t1 = Test1_deprecated('fully') + assert(t1._userdeps == [('Test0', udeps.by_part)]) + + with pytest.warns(ReframeDeprecationWarning): + t1 = Test1_deprecated('by_case') + assert(t1._userdeps == [('Test0', udeps.by_case)]) + + with pytest.warns(ReframeDeprecationWarning): + t1 = Test1_deprecated('exact') + how = t1._userdeps[0][1] + t0_cases = [(p, e) for p in ['p0', 'p1'] + for e in ['e0', 'e1']] + t1_cases = [(p, e) for p in ['p0', 'p1'] + for e in ['e0', 'e1']] + deps = {(t0, t1) for t0 in t0_cases + for t1 in t1_cases if how(t0, t1)} + assert deps == { + (t0, t1) for t0 in t0_cases + for t1 in t1_cases + if ((t0[0] == t1[0] and t0[1] == 'e0') or + (t0[0] == t1[0] and t0[1] == 'e1' and t1[1] == 'e1')) + } + assert len(deps) == 6 + + def test_build_deps(loader, exec_ctx): checks = loader.load_all() cases = executors.generate_testcases(checks) # Test calling getdep() before having built the graph - t = find_check('Test1_exact', checks) + t = find_check('Test1_fully', checks) with pytest.raises(DependencyError): - t.getdep('Test0', 'e0') + t.getdep('Test0', 'e0', 'p0') # Build dependencies and continue testing - deps = dependency.build_deps(cases) - dependency.validate_deps(deps) - - # Check DEPEND_FULLY dependencies - assert num_deps(deps, 'Test1_fully') == 8 + deps = dependencies.build_deps(cases) + dependencies.validate_deps(deps) + + # Check dependencies for fully connected graph + assert num_deps(deps, 'Test1_fully') == 16 + for p0 in ['sys0:p0', 'sys0:p1']: + for p1 in ['sys0:p0', 'sys0:p1']: + for e0 in ['e0', 'e1']: + for e1 in ['e0', 'e1']: + assert has_edge(deps, + Node('Test1_fully', p0, e0), + Node('Test0', p1, e1)) + + # Check dependencies with same partition + assert num_deps(deps, 'Test1_by_part') == 8 for p in ['sys0:p0', 'sys0:p1']: for e0 in ['e0', 'e1']: for e1 in ['e0', 'e1']: assert has_edge(deps, - Node('Test1_fully', p, e0), + Node('Test1_by_part', p, e0), Node('Test0', p, e1)) - # Check DEPEND_BY_ENV - assert num_deps(deps, 'Test1_by_env') == 4 + # Check dependencies with same partition environment + assert num_deps(deps, 'Test1_by_case') == 4 assert num_deps(deps, 'Test1_default') == 4 for p in ['sys0:p0', 'sys0:p1']: for e in ['e0', 'e1']: assert has_edge(deps, - Node('Test1_by_env', p, e), + Node('Test1_by_case', p, e), Node('Test0', p, e)) assert has_edge(deps, Node('Test1_default', p, e), Node('Test0', p, e)) - # Check DEPEND_EXACT - assert num_deps(deps, 'Test1_exact') == 6 - for p in ['sys0:p0', 'sys0:p1']: - assert has_edge(deps, - Node('Test1_exact', p, 'e0'), - Node('Test0', p, 'e0')) - assert has_edge(deps, - Node('Test1_exact', p, 'e0'), - Node('Test0', p, 'e1')) - assert has_edge(deps, - Node('Test1_exact', p, 'e1'), - Node('Test0', p, 'e1')) + assert num_deps(deps, 'Test1_any') == 12 + for p0 in ['sys0:p0', 'sys0:p1']: + for p1 in ['sys0:p0', 'sys0:p1']: + for e0 in ['e0', 'e1']: + for e1 in ['e0', 'e1']: + if (p0 == 'sys0:p0' or e1 == 'e1'): + assert has_edge(deps, + Node('Test1_any', p0, e0), + Node('Test0', p1, e1)) + + assert num_deps(deps, 'Test1_all') == 2 + for p0 in ['sys0:p0', 'sys0:p1']: + for p1 in ['sys0:p0', 'sys0:p1']: + for e0 in ['e0', 'e1']: + for e1 in ['e0', 'e1']: + if (p0 == 'sys0:p0' and p1 == 'sys0:p0' and e1 == 'e1'): + assert has_edge(deps, + Node('Test1_any', p0, e0), + Node('Test0', p1, e1)) + + # Check custom dependencies + assert num_deps(deps, 'Test1_custom') == 1 + assert has_edge(deps, + Node('Test1_custom', 'sys0:p0', 'e0'), + Node('Test0', 'sys0:p1', 'e1')) + + # Check dependencies of Test1_nodeps + assert num_deps(deps, 'Test1_nodeps') == 0 # Check in-degree of Test0 - # 2 from Test1_fully, - # 1 from Test1_by_env, - # 1 from Test1_exact, + # 4 from Test1_fully, + # 2 from Test1_by_part, + # 1 from Test1_by_case, + # 2 from Test1_any, + # 2 from Test1_all, + # 0 from Test1_custom, # 1 from Test1_default - assert in_degree(deps, Node('Test0', 'sys0:p0', 'e0')) == 5 - assert in_degree(deps, Node('Test0', 'sys0:p1', 'e0')) == 5 - - # 2 from Test1_fully, - # 1 from Test1_by_env, - # 2 from Test1_exact, + # 0 from Test1_nodeps + assert in_degree(deps, Node('Test0', 'sys0:p0', 'e0')) == 12 + + # 4 from Test1_fully, + # 2 from Test1_by_part, + # 1 from Test1_by_case, + # 2 from Test1_any, + # 0 from Test1_all, + # 0 from Test1_custom, # 1 from Test1_default - assert in_degree(deps, Node('Test0', 'sys0:p0', 'e1')) == 6 - assert in_degree(deps, Node('Test0', 'sys0:p1', 'e1')) == 6 + # 0 from Test1_nodeps + assert in_degree(deps, Node('Test0', 'sys0:p1', 'e0')) == 10 + + # 4 from Test1_fully, + # 2 from Test1_by_part, + # 1 from Test1_by_case, + # 4 from Test1_any, + # 0 from Test1_all, + # 0 from Test1_custom, + # 1 from Test1_default + # 0 from Test1_nodeps + assert in_degree(deps, Node('Test0', 'sys0:p0', 'e1')) == 12 + + # 4 from Test1_fully, + # 2 from Test1_by_part, + # 1 from Test1_by_case, + # 4 from Test1_any, + # 0 from Test1_all, + # 1 from Test1_custom, + # 1 from Test1_default + # 0 from Test1_nodeps + assert in_degree(deps, Node('Test0', 'sys0:p1', 'e1')) == 13 # Pick a check to test getdep() - check_e0 = find_case('Test1_exact', 'e0', cases).check - check_e1 = find_case('Test1_exact', 'e1', cases).check + check_e0 = find_case('Test1_by_part', 'e0', 'p0', cases).check + check_e1 = find_case('Test1_by_part', 'e1', 'p0', cases).check with pytest.raises(DependencyError): - check_e0.getdep('Test0') + check_e0.getdep('Test0', 'p0') # Set the current environment check_e0._current_environ = Environment('e0') check_e1._current_environ = Environment('e1') - assert check_e0.getdep('Test0', 'e0').name == 'Test0' - assert check_e0.getdep('Test0', 'e1').name == 'Test0' - assert check_e1.getdep('Test0', 'e1').name == 'Test0' + assert check_e0.getdep('Test0', 'e0', 'p0').name == 'Test0' + assert check_e0.getdep('Test0', 'e1', 'p0').name == 'Test0' + assert check_e1.getdep('Test0', 'e1', 'p0').name == 'Test0' with pytest.raises(DependencyError): - check_e0.getdep('TestX', 'e0') + check_e0.getdep('TestX_deprecated', 'e0', 'p0') with pytest.raises(DependencyError): - check_e0.getdep('Test0', 'eX') + check_e0.getdep('Test0', 'eX', 'p0') with pytest.raises(DependencyError): - check_e1.getdep('Test0', 'e0') - - -def test_build_deps_unknown_test(loader, exec_ctx): - checks = loader.load_all() - - # Add some inexistent dependencies - test0 = find_check('Test0', checks) - for depkind in ('default', 'fully', 'by_env', 'exact'): - test1 = find_check('Test1_' + depkind, checks) - if depkind == 'default': - test1.depends_on('TestX') - elif depkind == 'exact': - test1.depends_on('TestX', rfm.DEPEND_EXACT, {'e0': ['e0']}) - elif depkind == 'fully': - test1.depends_on('TestX', rfm.DEPEND_FULLY) - elif depkind == 'by_env': - test1.depends_on('TestX', rfm.DEPEND_BY_ENV) - - with pytest.raises(DependencyError): - dependency.build_deps(executors.generate_testcases(checks)) - - -def test_build_deps_unknown_target_env(loader, exec_ctx): - checks = loader.load_all() - - # Add some inexistent dependencies - test0 = find_check('Test0', checks) - test1 = find_check('Test1_default', checks) - test1.depends_on('Test0', rfm.DEPEND_EXACT, {'e0': ['eX']}) - with pytest.raises(DependencyError): - dependency.build_deps(executors.generate_testcases(checks)) - - -def test_build_deps_unknown_source_env(loader, exec_ctx): - checks = loader.load_all() - - # Add some inexistent dependencies - test0 = find_check('Test0', checks) - test1 = find_check('Test1_default', checks) - test1.depends_on('Test0', rfm.DEPEND_EXACT, {'eX': ['e0']}) - - # Unknown source is ignored, because it might simply be that the test - # is not executed for eX - deps = dependency.build_deps(executors.generate_testcases(checks)) - assert num_deps(deps, 'Test1_default') == 4 + check_e1.getdep('Test0', 'e0', 'p1') def test_build_deps_empty(exec_ctx): - assert {} == dependency.build_deps([]) + assert {} == dependencies.build_deps([]) @pytest.fixture @@ -294,8 +571,8 @@ def test_valid_deps(make_test, exec_ctx): t6.depends_on('t5') t7.depends_on('t5') t8.depends_on('t7') - dependency.validate_deps( - dependency.build_deps( + dependencies.validate_deps( + dependencies.build_deps( executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7, t8]) ) @@ -332,13 +609,13 @@ def test_cyclic_deps(make_test, exec_ctx): t6.depends_on('t5') t7.depends_on('t5') t8.depends_on('t7') - deps = dependency.build_deps( + deps = dependencies.build_deps( executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7, t8]) ) with pytest.raises(DependencyError) as exc_info: - dependency.validate_deps(deps) + dependencies.validate_deps(deps) assert ('t4->t2->t1->t4' in str(exc_info.value) or 't2->t1->t4->t2' in str(exc_info.value) or @@ -351,20 +628,20 @@ def test_cyclic_deps(make_test, exec_ctx): def test_cyclic_deps_by_env(make_test, exec_ctx): t0 = make_test('t0') t1 = make_test('t1') - t1.depends_on('t0', rfm.DEPEND_EXACT, {'e0': ['e0']}) - t0.depends_on('t1', rfm.DEPEND_EXACT, {'e1': ['e1']}) - deps = dependency.build_deps( + t1.depends_on('t0', udeps.env_is('e0')) + t0.depends_on('t1', udeps.env_is('e1')) + deps = dependencies.build_deps( executors.generate_testcases([t0, t1]) ) with pytest.raises(DependencyError) as exc_info: - dependency.validate_deps(deps) + dependencies.validate_deps(deps) assert ('t1->t0->t1' in str(exc_info.value) or 't0->t1->t0' in str(exc_info.value)) def test_validate_deps_empty(exec_ctx): - dependency.validate_deps({}) + dependencies.validate_deps({}) def assert_topological_order(cases, graph): @@ -431,11 +708,11 @@ def test_toposort(make_test, exec_ctx): t6.depends_on('t5') t7.depends_on('t5') t8.depends_on('t7') - deps = dependency.build_deps( + deps = dependencies.build_deps( executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7, t8]) ) - cases = dependency.toposort(deps) + cases = dependencies.toposort(deps) assert_topological_order(cases, deps) @@ -462,11 +739,11 @@ def test_toposort_subgraph(make_test, exec_ctx): t3.depends_on('t2') t4.depends_on('t2') t4.depends_on('t3') - full_deps = dependency.build_deps( + full_deps = dependencies.build_deps( executors.generate_testcases([t0, t1, t2, t3, t4]) ) - partial_deps = dependency.build_deps( + partial_deps = dependencies.build_deps( executors.generate_testcases([t3, t4]), full_deps ) - cases = dependency.toposort(partial_deps, is_subgraph=True) + cases = dependencies.toposort(partial_deps, is_subgraph=True) assert_topological_order(cases, partial_deps) diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 22cbc887d7..ac9be046cb 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -646,7 +646,7 @@ def x(self): def test_require_deps(local_exec_ctx): - import reframe.frontend.dependency as dependency + import reframe.frontend.dependencies as dependencies import reframe.frontend.executors as executors @fixtures.custom_prefix('unittests/resources/checks') @@ -675,8 +675,8 @@ def setz(self, T0): self.z = T0().x + 2 cases = executors.generate_testcases([T0(), T1()]) - deps = dependency.build_deps(cases) - for c in dependency.toposort(deps): + deps = dependencies.build_deps(cases) + for c in dependencies.toposort(deps): _run(*c) for c in cases: diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 2f0d78a737..7168c5caf8 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -14,7 +14,7 @@ import reframe import reframe.core.runtime as rt -import reframe.frontend.dependency as dependency +import reframe.frontend.dependencies as dependencies import reframe.frontend.executors as executors import reframe.frontend.executors.policies as policies import reframe.utility as util @@ -91,9 +91,9 @@ def _make_cases(checks=None, sort=False, *args, **kwargs): cases = executors.generate_testcases(checks, *args, **kwargs) if sort: - depgraph = dependency.build_deps(cases) - dependency.validate_deps(depgraph) - cases = dependency.toposort(depgraph) + depgraph = dependencies.build_deps(cases) + dependencies.validate_deps(depgraph) + cases = dependencies.toposort(depgraph) return cases