Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add mpi helper classes #163

Merged
merged 35 commits into from Nov 9, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
1fad5c0
[tools] add mpi helper module
sdrave May 7, 2015
e2e8b1d
[vectorarrays] add mpi aware NumpyVectorArray
sdrave May 7, 2015
2fa2747
[vectorarrays] add MPIVectorArray
sdrave May 7, 2015
e42d30d
[tools.mpi] catch exceptions in event loop
sdrave Jun 3, 2015
81d3ee6
[vectorarrays.mpi] add MPIDistributed, MPIForbidCommunication, MPILoc…
sdrave Jun 3, 2015
dac8ccf
[vectorarrays.mpi] remove leftover code
sdrave Jun 3, 2015
d923fba
[discretizations] start work on MPIDiscretization
sdrave Jun 3, 2015
2c8c922
[vectorarrays.mpi] merge MPILocalSubtypes feature into MPIVectorArray
sdrave Jun 3, 2015
faf2ddd
[vectorarrays.mpi] add MPIVector, MPIDistributedVector, MPIVectorForb…
sdrave Jun 3, 2015
349bce0
[tools.mpi] add 'parallel' attribute
sdrave Jun 3, 2015
65d4c24
[vectorarrays] add MPIVectorArrayAutoComm, MPIVectorArrayNoComm, MPIV…
sdrave Jun 3, 2015
39ad70b
[tools.mpi] small tweaks
sdrave Jun 4, 2015
ecff759
[vectorarray.mpi] bugfix
sdrave Jun 4, 2015
395bdbf
[operators] add MPIOperator
sdrave Jun 4, 2015
2e4efc6
[discretizations] extend MPIDiscretization
sdrave Jun 4, 2015
762dc75
[mpi] fix typo in wrapper
renefritze Jun 5, 2015
53bc199
[vectorarray] implement components,amax for MPIVectorArrayAutoComm
sdrave Jun 6, 2015
7fd5189
[tools.mpi] allow to run script with command line options
sdrave Jun 6, 2015
fdde72a
[operators] add 'as_vector' to MPIOperator
sdrave Jun 9, 2015
7bc8e1b
[discretizations] add parameter_space to MPIDiscretization
sdrave Jun 9, 2015
c2d2d01
[operators] some fixes for MPIOperator
sdrave Jun 9, 2015
ce5372f
[tools.mpi] print tracebacks when catching exceptions on ranks
sdrave Jun 9, 2015
8c99b84
[tools.mpi] quit event loops when main script has finished
sdrave Jun 9, 2015
cd03c9e
[tools.mpi] fix 'quit' call in __main__
sdrave Jun 10, 2015
74cc5de
[vectorarray] fix MPIVectorArrayAutoComm.amax
sdrave Jun 12, 2015
29d03d9
[mpi] remove almost_equal from MPIVectorArray
sdrave Oct 7, 2015
136dcdd
[tools.mpi] fix typo
sdrave Oct 8, 2015
554f03b
[tools.mpi] remove method_callN* methods and add 'function_call'
sdrave Oct 8, 2015
32557f4
[docs] add documentation for pymor.tools.mpi
sdrave Oct 9, 2015
d16064b
[docs] add docs for pymor.vectorarrays.mpi
sdrave Oct 9, 2015
a795ab3
[docs] fixes for pymor.tools.mpi docs
sdrave Oct 9, 2015
be6ef8f
[MPIOperator] minor tweak
sdrave Oct 20, 2015
7015744
[docs] add documentation for MPIOperator
sdrave Oct 20, 2015
a31d42c
[docs] add documentation for pymor.discretizations.mpi
sdrave Nov 6, 2015
493f49e
Merge branch 'master' into mpi_merge
renefritze Nov 6, 2015
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
159 changes: 159 additions & 0 deletions src/pymor/discretizations/mpi.py
@@ -0,0 +1,159 @@
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)

from __future__ import absolute_import, division, print_function

from pymor.core.interfaces import ImmutableInterface
from pymor.discretizations.basic import DiscretizationBase
from pymor.operators.mpi import mpi_wrap_operator
from pymor.tools import mpi
from pymor.vectorarrays.interfaces import VectorSpace
from pymor.vectorarrays.mpi import MPIVectorArray


class MPIDiscretization(DiscretizationBase):
"""MPI distributed discretization.

Given a single-rank implementation of a |Discretization|, this
wrapper class uses the event loop from :mod:`pymor.tools.mpi`
to allow an MPI distributed usage of the |Discretization|.
The underlying implementation needs to be MPI aware.
In particular, the discretization's `solve` method has to
perform an MPI parallel solve of the discretization.

Note that this class is not intended to be instantiated directly.
Instead, you should use :func:`mpi_wrap_discretization`.

Parameters
----------
obj_id
:class:`~pymor.tools.mpi.ObjectId` of the local
|Discretization| on each rank.
operators
Dictionary of all |Operators| contained in the discretization,
wrapped for use on rank 0. Use :func:`mpi_wrap_discretization`
to automatically wrap all operators of a given MPI-aware
|Discretization|.
functionals
See `operators`.
vector_operators
See `operators`.
products
See `operators`.
array_type
This class will be used to wrap the local |VectorArrays|
returned by `solve` on each rank into an MPI distributed
|VectorArray| managed from rank 0. By default,
:class:`~pymor.vectorarrays.mpi.MPIVectorArray` will be used,
other options are :class:`~pymor.vectorarrays.mpi.MPIVectorArrayAutoComm`
and :class:`~pymor.vectorarrays.mpi.MPIVectorArrayNoComm`.
"""

def __init__(self, obj_id, operators, functionals, vector_operators, products=None, array_type=MPIVectorArray):
d = mpi.get_object(obj_id)
visualizer = MPIVisualizer(obj_id)
super(MPIDiscretization, self).__init__(operators, functionals, vector_operators, products=products,
visualizer=visualizer, cache_region=None, name=d.name)
self.obj_id = obj_id
subtypes = mpi.call(_MPIDiscretization_get_subtypes, obj_id)
if all(subtype == subtypes[0] for subtype in subtypes):
subtypes = (subtypes[0],)
self.solution_space = VectorSpace(array_type, (d.solution_space.type, subtypes))
self.build_parameter_type(inherits=(d,))
self.parameter_space = d.parameter_space

def _solve(self, mu=None):
space = self.solution_space
return space.type(space.subtype[0], space.subtype[1],
mpi.call(mpi.method_call_manage, self.obj_id, 'solve', mu=mu))

def __del__(self):
mpi.call(mpi.remove_object, self.obj_id)


def _MPIDiscretization_get_subtypes(self):
self = mpi.get_object(self)
subtypes = mpi.comm.gather(self.solution_space.subtype, root=0)
if mpi.rank0:
return tuple(subtypes)


class MPIVisualizer(ImmutableInterface):

def __init__(self, d_obj_id):
self.d_obj_id = d_obj_id

def visualize(self, U, d, **kwargs):
mpi.call(mpi.method_call, self.d_obj_id, 'visualize', U.obj_id, **kwargs)


def mpi_wrap_discretization(obj_id, use_with=False, with_apply2=False, array_type=MPIVectorArray):
"""Wrap MPI distributed local |Discretizations| to a global |Discretization| on rank 0.

Given MPI distributed local |Discretizations| referred to by the
`~pymor.tools.mpi.ObjectId` `obj_id`, return a new |Discretization|
which manages these distributed discretizations from rank 0. This
is done by first wrapping all |Operators| of the |Discretization| using
:func:`~pymor.operators.mpi.mpi_wrap_operator`.

When `use_with` is `False`, an :class:`MPIDiscretization` is instatiated
with the wrapped operators. A call to
:meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve`
will then use an MPI parallel call to the
:meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve`
methods of the wrapped local |Discretizations| to obtain the solution.
This is usually what you want when the actual solve is performed by
an implementation in the external solver.

When `use_with` is `True`, :meth:`~pymor.core.interfaces.ImmutableInterface.with_`
is called on the local |Discretization| on rank 0, to obtain a new
|Discretization| with the wrapped MPI |Operators|. This is mainly useful
when the local discretizations are generic |Discretizations| as in
:mod:`pymor.discretizations.basic` and
:meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve`
is implemented directly in pyMOR via operations on the contained
|Operators|.

Parameters
----------
obj_id
:class:`~pymor.tools.mpi.ObjectId` of the local |Discretization|
on each rank.
use_with
See above.
with_apply2
See :class:`~pymor.operators.mpi.MPIOperator`.
array_type
See :class:`~pymor.operators.mpi.MPIOperator`.
"""

operators, functionals, vectors, products = \
mpi.call(_mpi_wrap_discretization_manage_operators, obj_id)

operators = {k: mpi_wrap_operator(v, with_apply2=with_apply2, array_type=array_type) if v else None
for k, v in operators.iteritems()}
functionals = {k: mpi_wrap_operator(v, functional=True, with_apply2=with_apply2, array_type=array_type) if v else None
for k, v in functionals.iteritems()}
vectors = {k: mpi_wrap_operator(v, vector=True, with_apply2=with_apply2, array_type=array_type) if v else None
for k, v in vectors.iteritems()}
products = {k: mpi_wrap_operator(v, with_apply2=with_apply2, array_type=array_type) if v else None
for k, v in products.iteritems()} if products else None

if use_with:
d = mpi.get_object(obj_id)
visualizer = MPIVisualizer(obj_id)
return d.with_(operators=operators, functionals=functionals, vector_operators=vectors, products=products,
visualizer=visualizer, cache_region=None)
else:
return MPIDiscretization(obj_id, operators, functionals, vectors, products, array_type=array_type)


def _mpi_wrap_discretization_manage_operators(obj_id):
d = mpi.get_object(obj_id)
operators = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.operators.iteritems())}
functionals = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.functionals.iteritems())}
vectors = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.vector_operators.iteritems())}
products = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.products.iteritems())} if d.products else None
if mpi.rank0:
return operators, functionals, vectors, products