From 8ad36d7f5a60695c42b30cb80070d6c6f14a304a Mon Sep 17 00:00:00 2001 From: lisawim Date: Wed, 29 May 2024 21:40:06 +0200 Subject: [PATCH 1/9] Added FI-SDC-DAE-MPI sweeper --- .../DAE/sweepers/fully_implicit_DAE_MPI.py | 213 ++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py diff --git a/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py b/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py new file mode 100644 index 0000000000..62c1cc21a4 --- /dev/null +++ b/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py @@ -0,0 +1,213 @@ +from mpi4py import MPI + +from pySDC.core.Errors import ParameterError +from pySDC.implementations.sweeper_classes.generic_implicit_MPI import SweeperMPI, generic_implicit_MPI +from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE + + +class SweeperDAEMPI(SweeperMPI): + r""" + MPI base class for DAE sweepers where each rank administers one collocation node. + + This class provides all the stuff to be done in parallel during the simulation. When implementing a new MPI-DAE sweeper multiple inheritance is used here + + >>> class fully_implicit_DAE_MPI(SweeperDAEMPI, generic_implicit_MPI, fully_implicit_DAE): + + Be careful with ordering of the class where the child class should inherit from! Due to multple inheritance several methods are overwritten here. In the example above (which is the class below) the class first inherits the methods + + - compute_residual() + - predict() + - compute_end_point() + + from ``SweeperDAEMPI``. From second inherited class ``generic_implicit_MPI`` the methods + + - integrate() + - update_nodes() + + are inherited, which then overwritten by the child class. + + Parameters + ---------- + params : dict + Parameters passed to the sweeper. + """ + + def __init__(self, params): + super().__init__(params) + + def compute_residual(self, stage=None): + r""" + Uses the absolute value of the DAE system + + .. math:: + ||F(t, u, u')|| + + for computing the residual in a chosen norm. If norm computes residual by using all values on collocation nodes, result is broadcasted to all processes; when norm only uses the value on last node, the result is collected on last process! + + Parameters + ---------- + stage : str, optional + The current stage of the step the level belongs to. + """ + + L = self.level + P = L.prob + + # Check if we want to skip the residual computation to gain performance + # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! + if stage in self.params.skip_residual_computation: + L.status.residual = 0.0 if L.status.residual is None else L.status.residual + return None + + res = P.eval_f(L.u[self.rank + 1], L.f[self.rank + 1], L.time + L.dt * self.coll.nodes[self.rank]) + res_norm = abs(res) # use abs function from data type here + + # find maximal residual over the nodes + if L.params.residual_type == 'full_abs': + L.status.residual = self.comm.allreduce(res_norm, op=MPI.MAX) + elif L.params.residual_type == 'last_abs': + L.status.residual = self.comm.bcast(res_norm, root=self.comm.size - 1) + elif L.params.residual_type == 'full_rel': + L.status.residual = self.comm.allreduce(res_norm / abs(L.u[0]), op=MPI.MAX) + elif L.params.residual_type == 'last_rel': + L.status.residual = self.comm.bcast(res_norm / abs(L.u[0]), root=self.comm.size - 1) + else: + raise NotImplementedError(f'residual type \"{L.params.residual_type}\" not implemented!') + + # indicate that the residual has seen the new values + L.status.updated = False + + return None + + def predict(self): + r""" + Predictor to fill values at nodes before first sweep. It can decide whether the + + - initial condition is spread to each node ('initial_guess' = 'spread'), + - or zero values are spread to each node ('initial_guess' = 'zero'). + + Default prediction for sweepers, only copies values to all collocation nodes. This function + overrides the base implementation by always initialising ``level.f`` to zero. This is necessary since + ``level.f`` stores the solution derivative in the fully implicit case, which is not initially known. + """ + + L = self.level + P = L.prob + + # set initial guess for gradient to zero + L.f[0] = P.dtype_f(init=P.init, val=0.0) + + if self.params.initial_guess == 'spread': + L.u[self.rank + 1] = P.dtype_u(L.u[0]) + L.f[self.rank + 1] = P.dtype_f(init=P.init, val=0.0) + elif self.params.initial_guess == 'zero': + L.u[self.rank + 1] = P.dtype_u(init=P.init, val=0.0) + L.f[self.rank + 1] = P.dtype_f(init=P.init, val=0.0) + else: + raise ParameterError(f'initial_guess option {self.params.initial_guess} not implemented') + + # indicate that this level is now ready for sweeps + L.status.unlocked = True + L.status.updated = True + + def compute_end_point(self): + """ + Compute u at the right point of the interval. + + The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False + + Returns + ------- + None + """ + + L = self.level + P = L.prob + L.uend = P.dtype_u(P.init, val=0.0) + + # check if Mth node is equal to right point and do_coll_update is false, perform a simple copy + if self.coll.right_is_node and not self.params.do_coll_update: + # a copy is sufficient + root = self.comm.Get_size() - 1 + if self.comm.rank == root: + L.uend = P.dtype_u(L.u[-1]) + # broadcast diff parts and alg parts separately + self.comm.Bcast(L.uend.diff, root=root) + self.comm.Bcast(L.uend.alg, root=root) + else: + raise NotImplementedError() + + return None + + +class fully_implicit_DAE_MPI(SweeperDAEMPI, generic_implicit_MPI, fully_implicit_DAE): + """ + Fully implicit DAE sweeper parallelized across the nodes. + Please supply a communicator as `comm` to the parameters! + """ + + def integrate(self, last_only=False): + r""" + Integrates the gradient. ``me`` serves as buffer, and root process ``m`` stores + the result of integral at node :math:`\tau_m` there. + + Parameters + ---------- + last_only : bool, optional + Integrate only the last node for the residual or all of them. + + Returns + ------- + me : list of dtype_u + Containing the integral as values. + """ + + L = self.level + P = L.prob + + me = P.dtype_u(P.init, val=0.0) + for m in [self.coll.num_nodes - 1] if last_only else range(self.coll.num_nodes): + recvBufDiff = me.diff[:] if m == self.rank else None + recvBufAlg = me.alg[:] if m == self.rank else None + integral = L.dt * self.coll.Qmat[m + 1, self.rank + 1] * L.f[self.rank + 1] + self.comm.Reduce( + integral.diff, recvBufDiff, root=m, op=MPI.SUM + ) + self.comm.Reduce( + integral.alg, recvBufAlg, root=m, op=MPI.SUM + ) + + return me + + def update_nodes(self): + r""" + Updates values of ``u`` and ``f`` at collocation nodes. This correspond to a single iteration of the + preconditioned Richardson iteration in **"ordinary"** SDC. + """ + + L = self.level + P = L.prob + + # only if the level has been touched before + assert L.status.unlocked + + integral = self.integrate() + integral -= L.dt * self.QI[self.rank + 1, self.rank + 1] * L.f[self.rank + 1] + integral += L.u[0] + + u_approx = P.dtype_u(integral) + + L.f[self.rank + 1][:] = P.solve_system( + fully_implicit_DAE.F, + u_approx, + L.dt * self.QI[self.rank + 1, self.rank + 1], + L.f[self.rank + 1], + L.time + L.dt * self.coll.nodes[self.rank], + ) + + integral = self.integrate() + L.u[self.rank + 1] = L.u[0] + integral + # indicate presence of new values at this level + L.status.updated = True + + return None From f546650f9128b69a79ce7ccc3d2b2f33cc0eb029 Mon Sep 17 00:00:00 2001 From: lisawim Date: Thu, 30 May 2024 06:36:46 +0200 Subject: [PATCH 2/9] Changes in docu + SI-SDC-DAE-MPI sweeper with playground --- pySDC/playgrounds/DAE/playground_MPI.py | 87 +++++++++++++++ .../DAE/sweepers/SemiImplicitDAEMPI.py | 100 ++++++++++++++++++ .../DAE/sweepers/fully_implicit_DAE.py | 4 +- .../DAE/sweepers/fully_implicit_DAE_MPI.py | 42 +++++--- 4 files changed, 218 insertions(+), 15 deletions(-) create mode 100644 pySDC/playgrounds/DAE/playground_MPI.py create mode 100644 pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py diff --git a/pySDC/playgrounds/DAE/playground_MPI.py b/pySDC/playgrounds/DAE/playground_MPI.py new file mode 100644 index 0000000000..3f9d212f32 --- /dev/null +++ b/pySDC/playgrounds/DAE/playground_MPI.py @@ -0,0 +1,87 @@ +from mpi4py import MPI +from pathlib import Path + +from pySDC.projects.DAE.sweepers.SemiImplicitDAEMPI import SemiImplicitDAEMPI +from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + +from pySDC.playgrounds.DAE.DiscontinuousTestDAE import plotSolution + +from pySDC.implementations.hooks.log_solution import LogSolution + + +def run(): + r""" + Routine to do a run using the semi-implicit SDC-DAE sweeper enabling parallelization across the nodes. To run the script use the command + + >>> mpiexec -n 3 python3 playground_MPI.py + """ + + # This communicator is needed for the SDC-DAE sweeper + comm = MPI.COMM_WORLD + + # initialize level parameters + level_params = { + 'restol': 1e-12, + 'dt': 0.1, + } + + # initialize problem parameters + problem_params = { + 'newton_tol': 1e-6, + } + + # initialize sweeper parameters + sweeper_params = { + 'quad_type': 'RADAU-RIGHT', + 'num_nodes': 3, + 'QI': 'MIN-SR-S', # use a diagonal Q_Delta here! + 'initial_guess': 'spread', + 'comm': comm, + } + + # check if number of processes requested matches with number of nodes + assert sweeper_params['num_nodes'] == comm.Get_size(), f"Number of nodes does not match with number of processes!" + + # initialize step parameters + step_params = { + 'maxiter': 20, + } + + # initialize controller parameters + controller_params = { + 'logger_level': 30, + 'hook_class': [LogSolution], + } + + # fill description dictionary for easy step instantiation + description = { + 'problem_class': DiscontinuousTestDAE, + 'problem_params': problem_params, + 'sweeper_class': SemiImplicitDAEMPI, + 'sweeper_params': sweeper_params, + 'level_params': level_params, + 'step_params': step_params, + } + + # instantiate controller + controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) + P = controller.MS[0].levels[0].prob + + t0 = 1.0 + Tend = 3.0 + + uinit = P.u_exact(t0) + + # call main function to get things done... + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + Path("data").mkdir(parents=True, exist_ok=True) + + # only process with index 0 should plot + if comm.Get_rank() == 0: + plotSolution(stats) + + +if __name__ == "__main__": + run() diff --git a/pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py b/pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py new file mode 100644 index 0000000000..c54cb12911 --- /dev/null +++ b/pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py @@ -0,0 +1,100 @@ +from mpi4py import MPI + +from pySDC.projects.DAE.sweepers.fully_implicit_DAE_MPI import SweeperDAEMPI +from pySDC.projects.DAE.sweepers.SemiImplicitDAE import SemiImplicitDAE + + +class SemiImplicitDAEMPI(SweeperDAEMPI, SemiImplicitDAE): + r""" + Custom sweeper class to implement the fully-implicit SDC parallelized across the nodes for solving fully-implicit DAE problems of the form + + .. math:: + u' = f(u, z, t), + + .. math:: + 0 = g(u, z, t) + + More detailed description can be found in ``SemiImplicitDAE.py``. To parallelize SDC across the method the idea is to use a diagonal :math:`\mathbf{Q}_\Delta` to have solves of the implicit system on each node that can be done in parallel since they are fully decoupled from values of previous nodes. First such diagonal :math:`\mathbf{Q}_\Delta` were developed in [1]_. Years later intensive theory about the topic was developed in [2]_. For the DAE case these ideas were basically transferred. + + Note + ---- + For more details of implementing a sweeper enabling parallelization across the method we refer to documentation in [generic_implicit_MPI.py](https://github.com/Parallel-in-Time/pySDC/blob/master/pySDC/implementations/sweeper_classes/generic_implicit_MPI.py) and [fully_implicit_DAE_MPI.py](https://github.com/Parallel-in-Time/pySDC/blob/master/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py). + + For parallelization across the method for semi-explicit DAEs, differential and algebraic parts have to be treated separately. In ``integrate()`` these parts needs to be collected separately, otherwise information would get lost. + + Reference + --------- + .. [1] R. Speck. Parallelizing spectral deferred corrections across the method. Comput. Vis. Sci. 19, No. 3-4, 75-83 (2018). + .. [2] G. Čaklović, T. Lunet, S. Götschel, D. Ruprecht. Improving Efficiency of Parallel Across the Method Spectral Deferred Corrections. Preprint, arXiv:2403.18641 [math.NA] (2024). + """ + + def integrate(self, last_only=False): + """ + Integrates the gradient. Note that here only the differential part is integrated, i.e., the + integral over the algebraic part is zero. ``me`` serves as buffer, and root process ``m`` + stores the result of integral at node :math:`\tau_m` there. + + Parameters + ---------- + last_only : bool, optional + Integrate only the last node for the residual or all of them. + + Returns + ------- + me : list of dtype_u + Containing the integral as values. + """ + + L = self.level + P = L.prob + + me = P.dtype_u(P.init, val=0.0) + for m in [self.coll.num_nodes - 1] if last_only else range(self.coll.num_nodes): + recvBufDiff = me.diff[:] if m == self.rank else None + recvBufAlg = me.alg[:] if m == self.rank else None + integral = P.dtype_u(P.init, val=0.0) + integral.diff[:] = L.dt * self.coll.Qmat[m + 1, self.rank + 1] * L.f[self.rank + 1].diff[:] + self.comm.Reduce(integral.diff[:], recvBufDiff, root=m, op=MPI.SUM) + self.comm.Reduce(integral.alg[:], recvBufAlg, root=m, op=MPI.SUM) + + return me + + def update_nodes(self): + r""" + Updates values of ``u`` and ``f`` at collocation nodes. This correspond to a single iteration of the + preconditioned Richardson iteration in **"ordinary"** SDC. + """ + + L = self.level + P = L.prob + + # only if the level has been touched before + assert L.status.unlocked + + integral = self.integrate() + integral.diff[:] -= L.dt * self.QI[self.rank + 1, self.rank + 1] * L.f[self.rank + 1].diff[:] + integral.diff[:] += L.u[0].diff[:] + + u_approx = P.dtype_u(integral) + + u0 = P.dtype_u(P.init) + u0.diff[:], u0.alg[:] = L.f[self.rank + 1].diff[:], L.u[self.rank + 1].alg[:] + + u_new = P.solve_system( + SemiImplicitDAE.F, + u_approx, + L.dt * self.QI[self.rank + 1, self.rank + 1], + u0, + L.time + L.dt * self.coll.nodes[self.rank], + ) + + L.f[self.rank + 1].diff[:] = u_new.diff[:] + L.u[self.rank + 1].alg[:] = u_new.alg[:] + + integral = self.integrate() + L.u[self.rank + 1].diff[:] = L.u[0].diff[:] + integral.diff[:] + + # indicate presence of new values at this level + L.status.updated = True + + return None diff --git a/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py b/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py index 4cb8383685..fa6cfb944e 100644 --- a/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py +++ b/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py @@ -148,7 +148,7 @@ def compute_residual(self, stage=None): .. math:: ||F(t, u, u')|| - for computing the residual. + for computing the residual in a chosen norm. Parameters ---------- @@ -194,7 +194,7 @@ def compute_residual(self, stage=None): def compute_end_point(self): """ - Compute u at the right point of the interval + Compute u at the right point of the interval. The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False diff --git a/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py b/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py index 62c1cc21a4..1e01bea397 100644 --- a/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py +++ b/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py @@ -13,7 +13,7 @@ class SweeperDAEMPI(SweeperMPI): >>> class fully_implicit_DAE_MPI(SweeperDAEMPI, generic_implicit_MPI, fully_implicit_DAE): - Be careful with ordering of the class where the child class should inherit from! Due to multple inheritance several methods are overwritten here. In the example above (which is the class below) the class first inherits the methods + Be careful with ordering of classes where the child class should inherit from! Due to multple inheritance several methods are overwritten here. In the example above (which is the class below) the class first inherits the methods - compute_residual() - predict() @@ -141,9 +141,31 @@ def compute_end_point(self): class fully_implicit_DAE_MPI(SweeperDAEMPI, generic_implicit_MPI, fully_implicit_DAE): - """ - Fully implicit DAE sweeper parallelized across the nodes. - Please supply a communicator as `comm` to the parameters! + r""" + Custom sweeper class to implement the fully-implicit SDC parallelized across the nodes for solving fully-implicit DAE problems of the form + + .. math:: + F(t, u, u') = 0. + + More detailed description can be found in ``fully_implicit_DAE.py``. + + This sweeper uses diagonal matrices :math:`\mathbf{Q}_\Delta` to + solve the implicit system on each node in parallel. First diagonal matrices were first developed and applied in [1]_. Years later intensive theory in [2]_ is developed to that topic. Ideas of parallelizing SDC across the method are basically applied for the DAE case here. + + Note + ---- + Please supply a communicator as `comm` to the parameters! The number of processes used to parallelize needs to be equal to the number of collocation nodes used. For example, three collocation nodes are used and passed to the sweeper via + + >>> sweeper_params = {'num_nodes': 3} + + running a script ``example_mpi.py`` using ``mpi4py`` that is used to enable parallelism have to be done by using the command + + >>> mpiexec -n 3 python3 example.py + + Reference + --------- + .. [1] R. Speck. Parallelizing spectral deferred corrections across the method. Comput. Vis. Sci. 19, No. 3-4, 75-83 (2018). + .. [2] G. Čaklović, T. Lunet, S. Götschel, D. Ruprecht. Improving Efficiency of Parallel Across the Method Spectral Deferred Corrections. Preprint, arXiv:2403.18641 [math.NA] (2024). """ def integrate(self, last_only=False): @@ -167,15 +189,9 @@ def integrate(self, last_only=False): me = P.dtype_u(P.init, val=0.0) for m in [self.coll.num_nodes - 1] if last_only else range(self.coll.num_nodes): - recvBufDiff = me.diff[:] if m == self.rank else None - recvBufAlg = me.alg[:] if m == self.rank else None - integral = L.dt * self.coll.Qmat[m + 1, self.rank + 1] * L.f[self.rank + 1] - self.comm.Reduce( - integral.diff, recvBufDiff, root=m, op=MPI.SUM - ) - self.comm.Reduce( - integral.alg, recvBufAlg, root=m, op=MPI.SUM - ) + integral = L.dt * self.coll.Qmat[m + 1, self.rank + 1] * L.f[self.rank + 1][:] + recvBuf = me[:] if m == self.rank else None + self.comm.Reduce(integral, recvBuf, root=m, op=MPI.SUM) return me From f94f61bea7147bfaf8326ecc136b7c599cc6dbbb Mon Sep 17 00:00:00 2001 From: lisawim Date: Thu, 30 May 2024 18:41:31 +0200 Subject: [PATCH 3/9] Small changes for playgrounds --- pySDC/playgrounds/DAE/DiscontinuousTestDAE.py | 4 ++-- pySDC/playgrounds/DAE/playground_MPI.py | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pySDC/playgrounds/DAE/DiscontinuousTestDAE.py b/pySDC/playgrounds/DAE/DiscontinuousTestDAE.py index f0bfb7ae4e..7527219d4c 100644 --- a/pySDC/playgrounds/DAE/DiscontinuousTestDAE.py +++ b/pySDC/playgrounds/DAE/DiscontinuousTestDAE.py @@ -74,7 +74,7 @@ def simulateDAE(): plotSolution(stats) -def plotSolution(stats): +def plotSolution(stats, file_name='data/solution.png'): r""" Here, the solution of the DAE is plotted. @@ -96,7 +96,7 @@ def plotSolution(stats): plt.xlabel(r"Time $t$") plt.ylabel(r"Solution $y(t)$, $z(t)$") - plt.savefig('data/solution.png', dpi=300, bbox_inches='tight') + plt.savefig(file_name, dpi=300, bbox_inches='tight') plt.close() diff --git a/pySDC/playgrounds/DAE/playground_MPI.py b/pySDC/playgrounds/DAE/playground_MPI.py index 3f9d212f32..9e0503a131 100644 --- a/pySDC/playgrounds/DAE/playground_MPI.py +++ b/pySDC/playgrounds/DAE/playground_MPI.py @@ -12,9 +12,11 @@ def run(): r""" - Routine to do a run using the semi-implicit SDC-DAE sweeper enabling parallelization across the nodes. To run the script use the command + Routine to do a run using the semi-implicit SDC-DAE sweeper enabling parallelization across the nodes. The number of processes that is used to run this file is the number of collocation nodes used! When you run the script with the command >>> mpiexec -n 3 python3 playground_MPI.py + + then 3 collocation nodes are used for SDC. """ # This communicator is needed for the SDC-DAE sweeper @@ -34,7 +36,7 @@ def run(): # initialize sweeper parameters sweeper_params = { 'quad_type': 'RADAU-RIGHT', - 'num_nodes': 3, + 'num_nodes': comm.Get_size(), 'QI': 'MIN-SR-S', # use a diagonal Q_Delta here! 'initial_guess': 'spread', 'comm': comm, @@ -80,7 +82,8 @@ def run(): # only process with index 0 should plot if comm.Get_rank() == 0: - plotSolution(stats) + file_name = 'data/solution_MPI.png' + plotSolution(stats, file_name) if __name__ == "__main__": From 2abe2022a7cbc4d14c20de063e40014755f5f7b6 Mon Sep 17 00:00:00 2001 From: lisawim Date: Thu, 30 May 2024 18:42:15 +0200 Subject: [PATCH 4/9] Test for MPI sweepers + adding MPI stuff to environment.yml --- pySDC/projects/DAE/environment.yml | 2 + pySDC/projects/DAE/run/accuracy_check_MPI.py | 156 ++++++++++++++++++ pySDC/projects/DAE/tests/test_MPI_sweepers.py | 31 ++++ 3 files changed, 189 insertions(+) create mode 100644 pySDC/projects/DAE/run/accuracy_check_MPI.py create mode 100644 pySDC/projects/DAE/tests/test_MPI_sweepers.py diff --git a/pySDC/projects/DAE/environment.yml b/pySDC/projects/DAE/environment.yml index e5da507653..f492334a90 100644 --- a/pySDC/projects/DAE/environment.yml +++ b/pySDC/projects/DAE/environment.yml @@ -8,3 +8,5 @@ dependencies: - numpy - scipy>=0.17.1 - dill>=0.2.6 + - mpich + - mpi4py>=3.0.0 diff --git a/pySDC/projects/DAE/run/accuracy_check_MPI.py b/pySDC/projects/DAE/run/accuracy_check_MPI.py new file mode 100644 index 0000000000..59dc027dd1 --- /dev/null +++ b/pySDC/projects/DAE/run/accuracy_check_MPI.py @@ -0,0 +1,156 @@ +import numpy as np +from mpi4py import MPI + +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI +from pySDC.projects.DAE.misc.HookClass_DAE import ( + LogGlobalErrorPostStepDifferentialVariable, + LogGlobalErrorPostStepAlgebraicVariable, +) +from pySDC.helpers.stats_helper import get_sorted + + +def run(comm, dt, semi_implicit, index_case): + r""" + Prepares the controller with all the description needed. Here, the function decides to choose the correct sweeper + for the test. + + Parameters + ---------- + comm : mpi4py.MPI.COMM_World + Communicator + dt : float + Time step size chosen for simulation. + semi_implicit : bool + Modules are loaded either for fully-implicit case or semi-implicit case. + index_case : int + Denotes the index case of a DAE to be tested here, can be either ``1`` or ``2``. + """ + + if semi_implicit: + from pySDC.projects.DAE.sweepers.SemiImplicitDAEMPI import SemiImplicitDAEMPI as sweeper + + else: + from pySDC.projects.DAE.sweepers.fully_implicit_DAE_MPI import fully_implicit_DAE_MPI as sweeper + + if index_case == 1: + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE as problem + + t0 = 1.0 + Tend = 1.5 + + elif index_case == 2: + from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1 as problem + + t0 = 0.0 + Tend = 0.4 + + else: + raise NotImplementedError(f"DAE case of index {index_case} is not implemented!") + + # initialize level parameters + level_params = { + 'restol': 1e-12, + 'dt': dt, + } + + # initialize problem parameters + problem_params = { + 'newton_tol': 1e-6, + } + + # initialize sweeper parameters + comm = MPI.COMM_WORLD + sweeper_params = { + 'quad_type': 'RADAU-RIGHT', + 'num_nodes': comm.Get_size(), + 'QI': 'MIN-SR-S', # use a diagonal Q_Delta here! + 'initial_guess': 'spread', + 'comm': comm, + } + + # check if number of processes requested matches with number of nodes + assert ( + sweeper_params['num_nodes'] == comm.Get_size() + ), f"Number of nodes does not match with number of processes! Expected {sweeper_params['num_nodes']}, got {comm.Get_size()}!" + + # initialize step parameters + step_params = { + 'maxiter': 20, + } + + # initialize controller parameters + controller_params = { + 'logger_level': 30, + 'hook_class': [LogGlobalErrorPostStepDifferentialVariable, LogGlobalErrorPostStepAlgebraicVariable], + } + + # fill description dictionary for easy step instantiation + description = { + 'problem_class': problem, + 'problem_params': problem_params, + 'sweeper_class': sweeper, + 'sweeper_params': sweeper_params, + 'level_params': level_params, + 'step_params': step_params, + } + + # instantiate controller + controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) + P = controller.MS[0].levels[0].prob + + uinit = P.u_exact(t0) + + # call main function to get things done... + _, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + return stats + + +def check_order(comm): + for semi_implicit in [False, True]: + for index_case in [1, 2]: + dt_list = np.logspace(-1.7, -1.0, num=5) + + errorsDiff, errorsAlg = np.zeros(len(dt_list)), np.zeros(len(dt_list)) + for i, dt in enumerate(dt_list): + stats = run(comm, dt, semi_implicit, index_case) + + errorsDiff[i] = max( + np.array( + get_sorted(stats, type='e_global_differential_post_step', sortby='time', recomputed=False) + )[:, 1] + ) + errorsAlg[i] = max( + np.array(get_sorted(stats, type='e_global_algebraic_post_step', sortby='time', recomputed=False))[ + :, 1 + ] + ) + + # only process with index 0 should plot + if comm.Get_rank() == 0: + orderDiff = np.mean( + [ + np.log(errorsDiff[i] / errorsDiff[i - 1]) / np.log(dt_list[i] / dt_list[i - 1]) + for i in range(1, len(dt_list)) + ] + ) + orderAlg = np.mean( + [ + np.log(errorsAlg[i] / errorsAlg[i - 1]) / np.log(dt_list[i] / dt_list[i - 1]) + for i in range(1, len(dt_list)) + ] + ) + + refOrderDiff = 2 * comm.Get_size() - 1 + refOrderAlg = 2 * comm.Get_size() - 1 if index_case == 1 else comm.Get_size() + assert np.isclose( + orderDiff, refOrderDiff, atol=1e0 + ), f"Expected order {refOrderDiff} in differential variable, got {orderDiff}" + assert np.isclose( + orderAlg, refOrderAlg, atol=1e0 + ), f"Expected order {refOrderAlg} in algebraic variable, got {orderAlg}" + + +if __name__ == "__main__": + comm = MPI.COMM_WORLD + check_order(comm) diff --git a/pySDC/projects/DAE/tests/test_MPI_sweepers.py b/pySDC/projects/DAE/tests/test_MPI_sweepers.py new file mode 100644 index 0000000000..f298ec5fa9 --- /dev/null +++ b/pySDC/projects/DAE/tests/test_MPI_sweepers.py @@ -0,0 +1,31 @@ +import pytest + + +@pytest.mark.mpi4py +@pytest.mark.parametrize('num_procs', [2, 3]) +def testOrder(num_procs): + r""" + Test checks if order of accuracy is reached for the MPI sweepers. + """ + + import os + import subprocess + + # Set python path once + my_env = os.environ.copy() + my_env['PYTHONPATH'] = '../../..:.' + my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' + cwd = '.' + print(my_env['PYTHONPATH']) + cmd = ('mpirun -np ' + str(num_procs) + ' python pySDC/projects/DAE/run/accuracy_check_MPI.py').split() + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd) + + p.wait() + for line in p.stdout: + print(line) + for line in p.stderr: + print(line) + assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % ( + p.returncode, + num_procs, + ) From 5aa83b7db79762fc52bb96392fde858270c76bbd Mon Sep 17 00:00:00 2001 From: lisawim Date: Fri, 31 May 2024 07:24:33 +0200 Subject: [PATCH 5/9] Removed print line --- pySDC/projects/DAE/tests/test_MPI_sweepers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pySDC/projects/DAE/tests/test_MPI_sweepers.py b/pySDC/projects/DAE/tests/test_MPI_sweepers.py index f298ec5fa9..223b19faf0 100644 --- a/pySDC/projects/DAE/tests/test_MPI_sweepers.py +++ b/pySDC/projects/DAE/tests/test_MPI_sweepers.py @@ -16,7 +16,7 @@ def testOrder(num_procs): my_env['PYTHONPATH'] = '../../..:.' my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' cwd = '.' - print(my_env['PYTHONPATH']) + cmd = ('mpirun -np ' + str(num_procs) + ' python pySDC/projects/DAE/run/accuracy_check_MPI.py').split() p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd) From 2410c2a764d05c178b7b298ad80459621b03a0ba Mon Sep 17 00:00:00 2001 From: lisawim Date: Fri, 31 May 2024 15:43:58 +0200 Subject: [PATCH 6/9] Added test to check uend's from MPI and non-MPI versions + requested changes --- pySDC/projects/DAE/run/accuracy_check_MPI.py | 51 +++++++++---- .../DAE/sweepers/SemiImplicitDAEMPI.py | 6 +- .../DAE/sweepers/fully_implicit_DAE_MPI.py | 37 +-------- pySDC/projects/DAE/tests/test_MPI_sweepers.py | 75 ++++++++++++++++++- 4 files changed, 114 insertions(+), 55 deletions(-) diff --git a/pySDC/projects/DAE/run/accuracy_check_MPI.py b/pySDC/projects/DAE/run/accuracy_check_MPI.py index 59dc027dd1..c59a3aed0b 100644 --- a/pySDC/projects/DAE/run/accuracy_check_MPI.py +++ b/pySDC/projects/DAE/run/accuracy_check_MPI.py @@ -9,28 +9,42 @@ from pySDC.helpers.stats_helper import get_sorted -def run(comm, dt, semi_implicit, index_case): +def run(dt, num_nodes, use_MPI, semi_implicit, residual_type, index_case, comm=None): r""" Prepares the controller with all the description needed. Here, the function decides to choose the correct sweeper for the test. Parameters ---------- - comm : mpi4py.MPI.COMM_World - Communicator dt : float Time step size chosen for simulation. + num_nodes : int + Number of collocation nodes. + use_MPI : bool + If True, the MPI sweeper classes are used. semi_implicit : bool Modules are loaded either for fully-implicit case or semi-implicit case. + residual_type : str + Choose how to compute the residual. index_case : int Denotes the index case of a DAE to be tested here, can be either ``1`` or ``2``. + comm : mpi4py.MPI.COMM_WORLD + Communicator. """ - if semi_implicit: - from pySDC.projects.DAE.sweepers.SemiImplicitDAEMPI import SemiImplicitDAEMPI as sweeper + if not semi_implicit: + if use_MPI: + from pySDC.projects.DAE.sweepers.fully_implicit_DAE_MPI import fully_implicit_DAE_MPI as sweeper + + else: + from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE as sweeper else: - from pySDC.projects.DAE.sweepers.fully_implicit_DAE_MPI import fully_implicit_DAE_MPI as sweeper + if use_MPI: + from pySDC.projects.DAE.sweepers.SemiImplicitDAEMPI import SemiImplicitDAEMPI as sweeper + + else: + from pySDC.projects.DAE.sweepers.SemiImplicitDAE import SemiImplicitDAE as sweeper if index_case == 1: from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE as problem @@ -50,6 +64,7 @@ def run(comm, dt, semi_implicit, index_case): # initialize level parameters level_params = { 'restol': 1e-12, + 'residual_type': residual_type, 'dt': dt, } @@ -59,19 +74,19 @@ def run(comm, dt, semi_implicit, index_case): } # initialize sweeper parameters - comm = MPI.COMM_WORLD sweeper_params = { 'quad_type': 'RADAU-RIGHT', - 'num_nodes': comm.Get_size(), + 'num_nodes': num_nodes, 'QI': 'MIN-SR-S', # use a diagonal Q_Delta here! 'initial_guess': 'spread', - 'comm': comm, } # check if number of processes requested matches with number of nodes - assert ( - sweeper_params['num_nodes'] == comm.Get_size() - ), f"Number of nodes does not match with number of processes! Expected {sweeper_params['num_nodes']}, got {comm.Get_size()}!" + if comm is not None: + sweeper_params.update({'comm': comm}) + assert ( + sweeper_params['num_nodes'] == comm.Get_size() + ), f"Number of nodes does not match with number of processes! Expected {sweeper_params['num_nodes']}, got {comm.Get_size()}!" # initialize step parameters step_params = { @@ -101,19 +116,25 @@ def run(comm, dt, semi_implicit, index_case): uinit = P.u_exact(t0) # call main function to get things done... - _, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + controller.MS[0].levels[0].sweep.compute_end_point() + + residual = controller.MS[0].levels[0].status.residual - return stats + return uend, residual, stats def check_order(comm): + num_nodes = comm.Get_size() + use_MPI = True + residual_type = 'full_abs' for semi_implicit in [False, True]: for index_case in [1, 2]: dt_list = np.logspace(-1.7, -1.0, num=5) errorsDiff, errorsAlg = np.zeros(len(dt_list)), np.zeros(len(dt_list)) for i, dt in enumerate(dt_list): - stats = run(comm, dt, semi_implicit, index_case) + _, _, stats = run(dt, num_nodes, use_MPI, semi_implicit, residual_type, index_case, comm) errorsDiff[i] = max( np.array( diff --git a/pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py b/pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py index c54cb12911..d038b81922 100644 --- a/pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py +++ b/pySDC/projects/DAE/sweepers/SemiImplicitDAEMPI.py @@ -50,12 +50,10 @@ def integrate(self, last_only=False): me = P.dtype_u(P.init, val=0.0) for m in [self.coll.num_nodes - 1] if last_only else range(self.coll.num_nodes): - recvBufDiff = me.diff[:] if m == self.rank else None - recvBufAlg = me.alg[:] if m == self.rank else None integral = P.dtype_u(P.init, val=0.0) integral.diff[:] = L.dt * self.coll.Qmat[m + 1, self.rank + 1] * L.f[self.rank + 1].diff[:] - self.comm.Reduce(integral.diff[:], recvBufDiff, root=m, op=MPI.SUM) - self.comm.Reduce(integral.alg[:], recvBufAlg, root=m, op=MPI.SUM) + recvBuf = me[:] if m == self.rank else None + self.comm.Reduce(integral, recvBuf, root=m, op=MPI.SUM) return me diff --git a/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py b/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py index 1e01bea397..165792644d 100644 --- a/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py +++ b/pySDC/projects/DAE/sweepers/fully_implicit_DAE_MPI.py @@ -13,13 +13,12 @@ class SweeperDAEMPI(SweeperMPI): >>> class fully_implicit_DAE_MPI(SweeperDAEMPI, generic_implicit_MPI, fully_implicit_DAE): - Be careful with ordering of classes where the child class should inherit from! Due to multple inheritance several methods are overwritten here. In the example above (which is the class below) the class first inherits the methods + Be careful with ordering of classes where the child class should inherit from! Due to multiple inheritance several methods are overwritten here. In the example above (which is the class below) the class first inherits the methods - compute_residual() - predict() - - compute_end_point() - from ``SweeperDAEMPI``. From second inherited class ``generic_implicit_MPI`` the methods + from ``SweeperDAEMPI``. ``compute_end_point()`` is inherited from ``SweeperMPI``. From second inherited class ``generic_implicit_MPI`` the methods - integrate() - update_nodes() @@ -32,9 +31,6 @@ class SweeperDAEMPI(SweeperMPI): Parameters passed to the sweeper. """ - def __init__(self, params): - super().__init__(params) - def compute_residual(self, stage=None): r""" Uses the absolute value of the DAE system @@ -110,35 +106,6 @@ def predict(self): L.status.unlocked = True L.status.updated = True - def compute_end_point(self): - """ - Compute u at the right point of the interval. - - The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False - - Returns - ------- - None - """ - - L = self.level - P = L.prob - L.uend = P.dtype_u(P.init, val=0.0) - - # check if Mth node is equal to right point and do_coll_update is false, perform a simple copy - if self.coll.right_is_node and not self.params.do_coll_update: - # a copy is sufficient - root = self.comm.Get_size() - 1 - if self.comm.rank == root: - L.uend = P.dtype_u(L.u[-1]) - # broadcast diff parts and alg parts separately - self.comm.Bcast(L.uend.diff, root=root) - self.comm.Bcast(L.uend.alg, root=root) - else: - raise NotImplementedError() - - return None - class fully_implicit_DAE_MPI(SweeperDAEMPI, generic_implicit_MPI, fully_implicit_DAE): r""" diff --git a/pySDC/projects/DAE/tests/test_MPI_sweepers.py b/pySDC/projects/DAE/tests/test_MPI_sweepers.py index 223b19faf0..08eff6fd3b 100644 --- a/pySDC/projects/DAE/tests/test_MPI_sweepers.py +++ b/pySDC/projects/DAE/tests/test_MPI_sweepers.py @@ -8,6 +8,7 @@ def testOrder(num_procs): Test checks if order of accuracy is reached for the MPI sweepers. """ + import pySDC.projects.DAE.run.accuracy_check_MPI as acc import os import subprocess @@ -17,7 +18,7 @@ def testOrder(num_procs): my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' cwd = '.' - cmd = ('mpirun -np ' + str(num_procs) + ' python pySDC/projects/DAE/run/accuracy_check_MPI.py').split() + cmd = f"mpirun -np {num_procs} python {acc.__file__}".split() p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd) p.wait() @@ -29,3 +30,75 @@ def testOrder(num_procs): p.returncode, num_procs, ) + + +@pytest.mark.mpi4py +@pytest.mark.parametrize("num_nodes", [2]) +@pytest.mark.parametrize("residual_type", ['full_abs', 'last_abs', 'full_abs', 'full_rel']) +@pytest.mark.parametrize("semi_implicit", [True, False]) +@pytest.mark.parametrize("index_case", [1, 2]) +def testVersions(num_nodes, residual_type, semi_implicit, index_case, launch=True): + """ + Make a test if the result matches between the MPI and non-MPI versions of a sweeper. + Tests solution at the right end point and the residual. + + Args: + num_nodes (int): The number of nodes to use + quad_type (str): Type of nodes + residual_type (str): Type of residual computation + semi_implicit (bool): Use IMEX sweeper or not + launch (bool): If yes, it will launch `mpirun` with the required number of processes + """ + + if launch: + import os + import subprocess + + # Set python path once + my_env = os.environ.copy() + my_env['PYTHONPATH'] = '../../..:.' + my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' + cmd = f"mpirun -np {num_nodes} python {__file__} --testVersions {num_nodes} {residual_type} {semi_implicit} {index_case}".split() + + p = subprocess.Popen(cmd, env=my_env, cwd=".") + + p.wait() + assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % ( + p.returncode, + num_nodes, + ) + else: + import numpy as np + from pySDC.projects.DAE.run.accuracy_check_MPI import run + + semi_implicit = False if semi_implicit == 'False' else True + + dt = 0.1 + + MPI_uend, MPI_residual, _ = run( + dt=dt, + num_nodes=int(num_nodes), + use_MPI=True, + semi_implicit=semi_implicit, + residual_type=residual_type, + index_case=int(index_case), + ) + + nonMPI_uend, nonMPI_residual, _ = run( + dt=dt, + num_nodes=int(num_nodes), + use_MPI=True, + semi_implicit=semi_implicit, + residual_type=residual_type, + index_case=int(index_case), + ) + + assert np.allclose(MPI_uend, nonMPI_uend, atol=1e-14), 'Got different solutions at end point!' + assert np.allclose(MPI_residual, nonMPI_residual, atol=1e-14), 'Got different residuals!' + + +if __name__ == '__main__': + import sys + + if '--testVersions' in sys.argv: + testVersions(sys.argv[-4], sys.argv[-3], sys.argv[-2], sys.argv[-1], launch=False) From 8e4221d75453b5f5256f8924421045ce82ce0455 Mon Sep 17 00:00:00 2001 From: lisawim Date: Sat, 1 Jun 2024 19:13:08 +0200 Subject: [PATCH 7/9] What the jellyfish did I do here.. --- pySDC/projects/DAE/run/accuracy_check_MPI.py | 16 +++- pySDC/projects/DAE/tests/test_MPI_sweepers.py | 87 ++++++++++++------- 2 files changed, 69 insertions(+), 34 deletions(-) diff --git a/pySDC/projects/DAE/run/accuracy_check_MPI.py b/pySDC/projects/DAE/run/accuracy_check_MPI.py index c59a3aed0b..1834fa09ef 100644 --- a/pySDC/projects/DAE/run/accuracy_check_MPI.py +++ b/pySDC/projects/DAE/run/accuracy_check_MPI.py @@ -9,7 +9,7 @@ from pySDC.helpers.stats_helper import get_sorted -def run(dt, num_nodes, use_MPI, semi_implicit, residual_type, index_case, comm=None): +def run(dt, num_nodes, use_MPI, semi_implicit, residual_type, index_case, initial_guess='spread', comm=None): r""" Prepares the controller with all the description needed. Here, the function decides to choose the correct sweeper for the test. @@ -28,6 +28,8 @@ def run(dt, num_nodes, use_MPI, semi_implicit, residual_type, index_case, comm=N Choose how to compute the residual. index_case : int Denotes the index case of a DAE to be tested here, can be either ``1`` or ``2``. + initial_guess : str, optional + Type of initial guess for simulation. comm : mpi4py.MPI.COMM_WORLD Communicator. """ @@ -78,7 +80,7 @@ def run(dt, num_nodes, use_MPI, semi_implicit, residual_type, index_case, comm=N 'quad_type': 'RADAU-RIGHT', 'num_nodes': num_nodes, 'QI': 'MIN-SR-S', # use a diagonal Q_Delta here! - 'initial_guess': 'spread', + 'initial_guess': initial_guess, } # check if number of processes requested matches with number of nodes @@ -134,7 +136,15 @@ def check_order(comm): errorsDiff, errorsAlg = np.zeros(len(dt_list)), np.zeros(len(dt_list)) for i, dt in enumerate(dt_list): - _, _, stats = run(dt, num_nodes, use_MPI, semi_implicit, residual_type, index_case, comm) + _, _, stats = run( + dt=dt, + num_nodes=num_nodes, + use_MPI=use_MPI, + semi_implicit=semi_implicit, + residual_type=residual_type, + index_case=index_case, + comm=comm, + ) errorsDiff[i] = max( np.array( diff --git a/pySDC/projects/DAE/tests/test_MPI_sweepers.py b/pySDC/projects/DAE/tests/test_MPI_sweepers.py index 08eff6fd3b..03f35b2ae0 100644 --- a/pySDC/projects/DAE/tests/test_MPI_sweepers.py +++ b/pySDC/projects/DAE/tests/test_MPI_sweepers.py @@ -34,20 +34,29 @@ def testOrder(num_procs): @pytest.mark.mpi4py @pytest.mark.parametrize("num_nodes", [2]) -@pytest.mark.parametrize("residual_type", ['full_abs', 'last_abs', 'full_abs', 'full_rel']) +@pytest.mark.parametrize("residual_type", ['full_abs', 'last_abs', 'full_abs', 'last_rel']) @pytest.mark.parametrize("semi_implicit", [True, False]) @pytest.mark.parametrize("index_case", [1, 2]) -def testVersions(num_nodes, residual_type, semi_implicit, index_case, launch=True): - """ +@pytest.mark.parametrize("initial_guess", ['spread', 'zero', 'something_else']) +def testVersions(num_nodes, residual_type, semi_implicit, index_case, initial_guess, launch=True): + r""" Make a test if the result matches between the MPI and non-MPI versions of a sweeper. Tests solution at the right end point and the residual. - Args: - num_nodes (int): The number of nodes to use - quad_type (str): Type of nodes - residual_type (str): Type of residual computation - semi_implicit (bool): Use IMEX sweeper or not - launch (bool): If yes, it will launch `mpirun` with the required number of processes + Parameters + ---------- + num_nodes : int + Number of collocation nodes to use. + residual_type : str + Type of residual computation. + semi_implicit : bool + If True, semi-implicit sweeper is used. + index_case : int + Case of DAE index. Choose either between :math:`1` or :math:`2`. + initial_guess : str + Type of initial guess for simulation. + launch : bool + If yes, it will launch `mpirun` with the required number of processes """ if launch: @@ -58,7 +67,7 @@ def testVersions(num_nodes, residual_type, semi_implicit, index_case, launch=Tru my_env = os.environ.copy() my_env['PYTHONPATH'] = '../../..:.' my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' - cmd = f"mpirun -np {num_nodes} python {__file__} --testVersions {num_nodes} {residual_type} {semi_implicit} {index_case}".split() + cmd = f"mpirun -np {num_nodes} python {__file__} --testVersions {num_nodes} {residual_type} {semi_implicit} {index_case} {initial_guess}".split() p = subprocess.Popen(cmd, env=my_env, cwd=".") @@ -70,35 +79,51 @@ def testVersions(num_nodes, residual_type, semi_implicit, index_case, launch=Tru else: import numpy as np from pySDC.projects.DAE.run.accuracy_check_MPI import run + from pySDC.core.Errors import ParameterError semi_implicit = False if semi_implicit == 'False' else True dt = 0.1 - MPI_uend, MPI_residual, _ = run( - dt=dt, - num_nodes=int(num_nodes), - use_MPI=True, - semi_implicit=semi_implicit, - residual_type=residual_type, - index_case=int(index_case), - ) - - nonMPI_uend, nonMPI_residual, _ = run( - dt=dt, - num_nodes=int(num_nodes), - use_MPI=True, - semi_implicit=semi_implicit, - residual_type=residual_type, - index_case=int(index_case), - ) - - assert np.allclose(MPI_uend, nonMPI_uend, atol=1e-14), 'Got different solutions at end point!' - assert np.allclose(MPI_residual, nonMPI_residual, atol=1e-14), 'Got different residuals!' + if initial_guess == 'something_else': + with pytest.raises(ParameterError): + _, _, _ = run( + dt=dt, + num_nodes=int(num_nodes), + use_MPI=True, + semi_implicit=semi_implicit, + residual_type=residual_type, + index_case=int(index_case), + initial_guess=initial_guess, + ) + + else: + MPI_uend, MPI_residual, _ = run( + dt=dt, + num_nodes=int(num_nodes), + use_MPI=True, + semi_implicit=semi_implicit, + residual_type=residual_type, + index_case=int(index_case), + initial_guess=initial_guess, + ) + + nonMPI_uend, nonMPI_residual, _ = run( + dt=dt, + num_nodes=int(num_nodes), + use_MPI=False, + semi_implicit=semi_implicit, + residual_type=residual_type, + index_case=int(index_case), + initial_guess=initial_guess, + ) + + assert np.allclose(MPI_uend, nonMPI_uend, atol=1e-14), 'Got different solutions at end point!' + assert np.allclose(MPI_residual, nonMPI_residual, atol=1e-14), 'Got different residuals!' if __name__ == '__main__': import sys if '--testVersions' in sys.argv: - testVersions(sys.argv[-4], sys.argv[-3], sys.argv[-2], sys.argv[-1], launch=False) + testVersions(sys.argv[-5], sys.argv[-4], sys.argv[-3], sys.argv[-2], sys.argv[-1], launch=False) From c5ec02eb4fff5fa0ac6187bc96cc4bb6de2fc9f9 Mon Sep 17 00:00:00 2001 From: lisawim Date: Sat, 1 Jun 2024 19:15:53 +0200 Subject: [PATCH 8/9] Changed parametrization in test --- pySDC/projects/DAE/tests/test_MPI_sweepers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pySDC/projects/DAE/tests/test_MPI_sweepers.py b/pySDC/projects/DAE/tests/test_MPI_sweepers.py index 03f35b2ae0..31ceffab53 100644 --- a/pySDC/projects/DAE/tests/test_MPI_sweepers.py +++ b/pySDC/projects/DAE/tests/test_MPI_sweepers.py @@ -34,7 +34,7 @@ def testOrder(num_procs): @pytest.mark.mpi4py @pytest.mark.parametrize("num_nodes", [2]) -@pytest.mark.parametrize("residual_type", ['full_abs', 'last_abs', 'full_abs', 'last_rel']) +@pytest.mark.parametrize("residual_type", ['full_abs', 'last_abs', 'full_rel', 'last_rel']) @pytest.mark.parametrize("semi_implicit", [True, False]) @pytest.mark.parametrize("index_case", [1, 2]) @pytest.mark.parametrize("initial_guess", ['spread', 'zero', 'something_else']) From 63318e57bfce7f7669620e52aa39f816d606e5f9 Mon Sep 17 00:00:00 2001 From: lisawim Date: Sun, 2 Jun 2024 18:09:40 +0200 Subject: [PATCH 9/9] Update README.rst --- pySDC/projects/DAE/README.rst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pySDC/projects/DAE/README.rst b/pySDC/projects/DAE/README.rst index b58a6ead70..9116979daa 100644 --- a/pySDC/projects/DAE/README.rst +++ b/pySDC/projects/DAE/README.rst @@ -35,11 +35,19 @@ Project overview - | ``fully_implicit_dae_playground.py`` | Testing arena for the fully implicit sweeper. - | ``synchronous_machine_playground.py`` - | Testing arena for the synchronous machine model. + | Testing arena for the synchronous machine model. + - | ``accuracy_check_MPI.py`` + | Script checking the order of accuracy of MPI sweepers for DAEs of different indices. - sweepers - | ``fully_implicit_DAE.py`` | Sweeper that accepts a fully implicit formulation of a system of differential equations and applies to it a modified version of spectral deferred correction + - | ``SemiImplicitDAE.py`` + | SDC sweeper especially for semi-explicit DAEs. This sweeper is based on ideas mentioned in `Huang et al. (2007) `_. + - | ``fully_implicit_DAE_MPI.py`` + | MPI version of fully-implicit SDC-DAE sweeper. + - | ``SemiImplicitDAEMPI.py`` + | MPI version of semi-implicit SDC-DAE sweeper. Theoretical details ----------------------