Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds create_with_same_size function #15204

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
162 changes: 33 additions & 129 deletions include/deal.II/fe/fe_tools_extrapolate.templates.h
Original file line number Diff line number Diff line change
Expand Up @@ -1479,132 +1479,49 @@ namespace FETools
template <class VectorType>
using BlockType = typename BlockTypeHelper<VectorType>::type;

template <class VectorType, class DH>
void
reinit_distributed(const DH &dh, VectorType &vector)
{
vector.reinit(dh.n_dofs());
}

#ifdef DEAL_II_WITH_PETSC
template <int dim, int spacedim>
void
reinit_distributed(const DoFHandler<dim, spacedim> &dh,
PETScWrappers::MPI::Vector & vector)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dh.get_triangulation());
Assert(parallel_tria != nullptr, ExcNotImplemented());

const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
}
#endif // DEAL_II_WITH_PETSC

#ifdef DEAL_II_WITH_TRILINOS
template <int dim, int spacedim>
void
reinit_distributed(const DoFHandler<dim, spacedim> &dh,
TrilinosWrappers::MPI::Vector & vector)
template <class VectorType, int dim, int spacedim>
std::enable_if_t<!is_serial_vector<VectorType>::value, VectorType>
create_distributed_vector(const DoFHandler<dim, spacedim> &dh)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dh.get_triangulation());
Assert(parallel_tria != nullptr, ExcNotImplemented());

const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
return VectorType(locally_owned_dofs, parallel_tria->get_communicator());
}



# ifdef DEAL_II_WITH_MPI
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <int dim, int spacedim, typename Number>
void
reinit_distributed(const DoFHandler<dim, spacedim> & dh,
LinearAlgebra::TpetraWrappers::Vector<Number> &vector)
template <class VectorType, int dim, int spacedim>
std::enable_if_t<is_serial_vector<VectorType>::value, VectorType>
create_distributed_vector(const DoFHandler<dim, spacedim> &dh)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dh.get_triangulation());
Assert(parallel_tria != nullptr, ExcNotImplemented());

const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
return VectorType(dh.n_dofs());
}
# endif

template <int dim, int spacedim>
void
reinit_distributed(const DoFHandler<dim, spacedim> & dh,
LinearAlgebra::EpetraWrappers::Vector &vector)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dh.get_triangulation());
Assert(parallel_tria != nullptr, ExcNotImplemented());

const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
}
# endif
#endif // DEAL_II_WITH_TRILINOS

template <int dim, int spacedim, typename Number>
void
reinit_distributed(const DoFHandler<dim, spacedim> & dh,
LinearAlgebra::distributed::Vector<Number> &vector)
template <class VectorType, int dim, int spacedim>
std::enable_if_t<IsBlockVector<VectorType>::value, BlockType<VectorType>>
create_block_type(const VectorType &, const DoFHandler<dim, spacedim> &dh)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dh.get_triangulation());
Assert(parallel_tria != nullptr, ExcNotImplemented());

const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
return create_distributed_vector<BlockType<VectorType>>(dh);
}



template <class VectorType, class DH>
void
reinit_ghosted(const DH & /*dh*/, VectorType & /*vector*/)
template <class VectorType, int dim, int spacedim>
std::enable_if_t<!IsBlockVector<VectorType>::value, BlockType<VectorType>>
create_block_type(const VectorType &v, const DoFHandler<dim, spacedim> &)
{
Assert(false, ExcNotImplemented());
return BlockType<VectorType>::create_with_same_size(v);
}

#ifdef DEAL_II_WITH_PETSC
template <int dim, int spacedim>
void
reinit_ghosted(const DoFHandler<dim, spacedim> &dh,
PETScWrappers::MPI::Vector & vector)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dh.get_triangulation());
Assert(parallel_tria != nullptr, ExcNotImplemented());
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
IndexSet locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);
vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
parallel_tria->get_communicator());
}
#endif // DEAL_II_WITH_PETSC

#ifdef DEAL_II_WITH_TRILINOS
template <int dim, int spacedim>
void
reinit_ghosted(const DoFHandler<dim, spacedim> &dh,
TrilinosWrappers::MPI::Vector & vector)

template <class VectorType, int dim, int spacedim>
std::enable_if_t<!is_serial_vector<VectorType>::value, VectorType>
create_distributed_vector_with_ghosts(const DoFHandler<dim, spacedim> &dh)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
Expand All @@ -1614,28 +1531,16 @@ namespace FETools
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
IndexSet locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);
vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
parallel_tria->get_communicator());
return VectorType(locally_owned_dofs,
locally_relevant_dofs,
parallel_tria->get_communicator());
}
#endif // DEAL_II_WITH_TRILINOS

template <int dim, int spacedim, typename Number>
void
reinit_ghosted(const DoFHandler<dim, spacedim> & dh,
LinearAlgebra::distributed::Vector<Number> &vector)
template <class VectorType, int dim, int spacedim>
std::enable_if_t<is_serial_vector<VectorType>::value, VectorType>
create_distributed_vector_with_ghosts(const DoFHandler<dim, spacedim> &)
{
const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dh.get_triangulation());
Assert(parallel_tria != nullptr, ExcNotImplemented());
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
IndexSet locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);
vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
parallel_tria->get_communicator());
Assert(false, ExcNotImplemented());
}


Expand Down Expand Up @@ -1737,11 +1642,10 @@ namespace FETools
ExcGridNotRefinedAtLeastOnce());
}


internal::BlockType<OutVector> u3;
internal::reinit_distributed(dof2, u3);
if (dynamic_cast<const parallel::distributed::Triangulation<dim, spacedim>
*>(&dof2.get_triangulation()) != nullptr)
auto u3 = internal::create_block_type(u2, dof2);
if (auto parallel_tria = dynamic_cast<
const parallel::distributed::Triangulation<dim, spacedim> *>(
&dof2.get_triangulation()))
{
Assert(dof1.get_fe(0).reference_cell() ==
ReferenceCells::get_hypercube<dim>(),
Expand All @@ -1752,8 +1656,8 @@ namespace FETools

interpolate(dof1, u1, dof2, constraints, u3);

internal::BlockType<OutVector> u3_relevant;
internal::reinit_ghosted(dof2, u3_relevant);
auto u3_relevant = internal::create_distributed_vector_with_ghosts<
internal::BlockType<OutVector>>(dof2);
u3_relevant = u3;

internal::ExtrapolateImplementation<dim, spacedim, OutVector>
Expand Down
5 changes: 3 additions & 2 deletions include/deal.II/fe/fe_tools_interpolate.templates.h
Original file line number Diff line number Diff line change
Expand Up @@ -704,8 +704,9 @@ namespace FETools
// and we cannot use the sadd function directly, so we have to create
// a completely distributed vector first and copy the local entries
// from the vector with ghost entries
TrilinosWrappers::MPI::Vector u1_completely_distributed;
u1_completely_distributed.reinit(u1_difference, true);
auto u1_completely_distributed =
TrilinosWrappers::MPI::Vector::create_with_same_size(u1_difference,
true);
u1_completely_distributed = u1;

u1_difference.sadd(-1, u1_completely_distributed);
Expand Down
45 changes: 21 additions & 24 deletions include/deal.II/lac/affine_constraints.templates.h
Original file line number Diff line number Diff line change
Expand Up @@ -2471,12 +2471,11 @@ namespace internal
// this is an operation that is different for all vector types and so we
// need a few overloads
#ifdef DEAL_II_WITH_TRILINOS
inline void
inline TrilinosWrappers::MPI::Vector
import_vector_with_ghost_elements(
const TrilinosWrappers::MPI::Vector &vec,
const IndexSet & /*locally_owned_elements*/,
const IndexSet & needed_elements,
TrilinosWrappers::MPI::Vector &output,
const IndexSet &needed_elements,
const std::integral_constant<bool, false> /*is_block_vector*/)
{
Assert(!vec.has_ghost_elements(), ExcGhostsPresent());
Expand All @@ -2485,91 +2484,91 @@ namespace internal
dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());

Assert(mpi_comm != nullptr, ExcInternalError());
output.reinit(needed_elements, mpi_comm->GetMpiComm());
TrilinosWrappers::MPI::Vector output(needed_elements,
mpi_comm->GetMpiComm());
# else
output.reinit(needed_elements, MPI_COMM_SELF);
TrilinosWrappers::MPI::Vector outputneeded_elements, MPI_COMM_SELF);
# endif
output = vec;
return output;
}
#endif

#ifdef DEAL_II_WITH_PETSC
inline void
inline PETScWrappers::MPI::Vector
import_vector_with_ghost_elements(
const PETScWrappers::MPI::Vector &vec,
const IndexSet & locally_owned_elements,
const IndexSet & needed_elements,
PETScWrappers::MPI::Vector & output,
const std::integral_constant<bool, false> /*is_block_vector*/)
{
output.reinit(locally_owned_elements,
needed_elements,
vec.get_mpi_communicator());
PETScWrappers::MPI::Vector output(locally_owned_elements,
needed_elements,
vec.get_mpi_communicator());
output = vec;
return output;
}
#endif

template <typename number>
void
LinearAlgebra::distributed::Vector<number>
import_vector_with_ghost_elements(
const LinearAlgebra::distributed::Vector<number> &vec,
const IndexSet & locally_owned_elements,
const IndexSet & needed_elements,
LinearAlgebra::distributed::Vector<number> & output,
const std::integral_constant<bool, false> /*is_block_vector*/)
{
// TODO: the in vector might already have all elements. need to find a
// way to efficiently avoid the copy then
const_cast<LinearAlgebra::distributed::Vector<number> &>(vec)
.zero_out_ghost_values();
output.reinit(locally_owned_elements,
needed_elements,
vec.get_mpi_communicator());
LinearAlgebra::distributed::Vector<number> output(
locally_owned_elements, needed_elements, vec.get_mpi_communicator());
output = vec;
output.update_ghost_values();
return output;
}

// all other vector non-block vector types are sequential and we should
// not have this function called at all -- so throw an exception
template <typename Vector>
void
Vector
import_vector_with_ghost_elements(
const Vector & /*vec*/,
const IndexSet & /*locally_owned_elements*/,
const IndexSet & /*needed_elements*/,
Vector & /*output*/,
const std::integral_constant<bool, false> /*is_block_vector*/)
{
Assert(false, ExcMessage("We shouldn't even get here!"));
}

// for block vectors, simply dispatch to the individual blocks
template <class VectorType>
void
VectorType
import_vector_with_ghost_elements(
const VectorType &vec,
const IndexSet & locally_owned_elements,
const IndexSet & needed_elements,
VectorType & output,
const std::integral_constant<bool, true> /*is_block_vector*/)
{
output.reinit(vec.n_blocks());
VectorType output(vec.n_blocks());

types::global_dof_index block_start = 0;
for (unsigned int b = 0; b < vec.n_blocks(); ++b)
{
import_vector_with_ghost_elements(
output.block(b) = import_vector_with_ghost_elements(
vec.block(b),
locally_owned_elements.get_view(block_start,
block_start + vec.block(b).size()),
needed_elements.get_view(block_start,
block_start + vec.block(b).size()),
output.block(b),
std::integral_constant<bool, false>());
block_start += vec.block(b).size();
}

output.collect_sizes();
return output;
}
} // namespace internal

Expand Down Expand Up @@ -2616,12 +2615,10 @@ AffineConstraints<number>::distribute(VectorType &vec) const
if (!vec_owned_elements.is_element(entry.first))
needed_elements.add_index(entry.first);

VectorType ghosted_vector;
internal::import_vector_with_ghost_elements(
auto ghosted_vector = internal::import_vector_with_ghost_elements(
vec,
vec_owned_elements,
needed_elements,
ghosted_vector,
std::integral_constant<bool, IsBlockVector<VectorType>::value>());

for (const ConstraintLine &line : lines)
Expand Down