Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MGTwoLevelTransfer: enable FE_Nothing #12891

Merged
merged 1 commit into from
Oct 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
107 changes: 74 additions & 33 deletions include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h
Original file line number Diff line number Diff line change
Expand Up @@ -1632,14 +1632,31 @@ namespace internal
transfer.n_components =
dof_handler_fine.get_fe_collection().n_components();

// TODO: replace with std::all_of once FECellection supports range-based
// iterations
const auto all_of = [](const auto &fe_collection, const auto &fu) {
for (unsigned int i = 0; i < fe_collection.size(); ++i)
if (fu(fe_collection[i]) == false)
return false;

return true;
};

transfer.fine_element_is_continuous =
dof_handler_fine.get_fe(0).n_dofs_per_vertex() > 0;
all_of(dof_handler_fine.get_fe_collection(), [](const auto &fe) {
return fe.n_dofs_per_cell() == 0 || fe.n_dofs_per_vertex() > 0;
});

for (unsigned int i = 0; i < dof_handler_fine.get_fe_collection().size();
++i)
Assert(transfer.fine_element_is_continuous ==
(dof_handler_fine.get_fe(i).n_dofs_per_vertex() > 0),
ExcInternalError());
#if DEBUG
const bool fine_element_is_discontinuous =
all_of(dof_handler_fine.get_fe_collection(), [](const auto &fe) {
return fe.n_dofs_per_cell() == 0 || fe.n_dofs_per_vertex() == 0;
});

Assert(transfer.fine_element_is_continuous !=
fine_element_is_discontinuous,
ExcNotImplemented());
#endif

const auto process_cells = [&](const auto &fu) {
loop_over_active_or_level_cells(
Expand Down Expand Up @@ -1757,7 +1774,7 @@ namespace internal
.reference_cell();

Assert(reference_cell ==
dof_handler_coarse.get_fe(fe_index_pair.first.second)
dof_handler_coarse.get_fe(fe_index_pair.first.first)
.reference_cell(),
ExcNotImplemented());
Comment on lines 1776 to 1779
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So this was a bug before, but we never actually triggered the alternative element?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. It was "just" an assert ;)


Expand Down Expand Up @@ -1899,13 +1916,17 @@ namespace internal
.reference_cell();

Assert(reference_cell ==
dof_handler_coarse.get_fe(fe_index_pair_.first.second)
dof_handler_coarse.get_fe(fe_index_pair_.first.first)
.reference_cell(),
ExcNotImplemented());

if (reference_cell == ReferenceCells::get_hypercube<dim>() &&
(dof_handler_coarse.get_fe(fe_index_pair.first) !=
dof_handler_fine.get_fe(fe_index_pair.second)))
dof_handler_fine.get_fe(fe_index_pair.second)) &&
(dof_handler_coarse.get_fe(fe_index_pair.first)
.n_dofs_per_cell() != 0 &&
dof_handler_fine.get_fe(fe_index_pair.second)
.n_dofs_per_cell() != 0))
{
const auto fe_fine = create_1D_fe(
dof_handler_fine.get_fe(fe_index_pair.second).base_element(0));
Expand Down Expand Up @@ -1979,7 +2000,11 @@ namespace internal
}
else if (reference_cell != ReferenceCells::get_hypercube<dim>() &&
(dof_handler_coarse.get_fe(fe_index_pair.first) !=
dof_handler_fine.get_fe(fe_index_pair.second)))
dof_handler_fine.get_fe(fe_index_pair.second)) &&
(dof_handler_coarse.get_fe(fe_index_pair.first)
.n_dofs_per_cell() != 0 &&
dof_handler_fine.get_fe(fe_index_pair.second)
.n_dofs_per_cell() != 0))
{
const auto &fe_fine =
dof_handler_fine.get_fe(fe_index_pair.second).base_element(0);
Expand Down Expand Up @@ -2336,14 +2361,21 @@ MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
{
for (unsigned int cell = 0; cell < scheme.n_coarse_cells; ++cell)
{
if (fine_element_is_continuous)
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
vec_fine_ptr->local_element(indices_fine[i]) +=
read_dof_values(indices_coarse[i], vec_coarse) * weights[i];
else
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
vec_fine_ptr->local_element(indices_fine[i]) +=
read_dof_values(indices_coarse[i], vec_coarse);
if ((scheme.n_dofs_per_cell_fine != 0) &&
(scheme.n_dofs_per_cell_coarse != 0))
{
if (fine_element_is_continuous)
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine;
++i)
vec_fine_ptr->local_element(indices_fine[i]) +=
read_dof_values(indices_coarse[i], vec_coarse) *
weights[i];
else
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine;
++i)
vec_fine_ptr->local_element(indices_fine[i]) +=
read_dof_values(indices_coarse[i], vec_coarse);
}

indices_fine += scheme.n_dofs_per_cell_fine;
indices_coarse += scheme.n_dofs_per_cell_coarse;
Expand Down Expand Up @@ -2488,18 +2520,25 @@ MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
{
for (unsigned int cell = 0; cell < scheme.n_coarse_cells; ++cell)
{
if (fine_element_is_continuous)
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
distribute_local_to_global(
indices_coarse[i],
vec_fine_ptr->local_element(indices_fine[i]) * weights[i],
this->vec_coarse);
else
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
distribute_local_to_global(indices_coarse[i],
vec_fine_ptr->local_element(
indices_fine[i]),
this->vec_coarse);
if ((scheme.n_dofs_per_cell_fine != 0) &&
(scheme.n_dofs_per_cell_coarse != 0))
{
if (fine_element_is_continuous)
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine;
++i)
distribute_local_to_global(indices_coarse[i],
vec_fine_ptr->local_element(
indices_fine[i]) *
weights[i],
this->vec_coarse);
else
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine;
++i)
distribute_local_to_global(indices_coarse[i],
vec_fine_ptr->local_element(
indices_fine[i]),
this->vec_coarse);
}

indices_fine += scheme.n_dofs_per_cell_fine;
indices_coarse += scheme.n_dofs_per_cell_coarse;
Expand Down Expand Up @@ -2625,9 +2664,11 @@ MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>>::
{
for (unsigned int cell = 0; cell < scheme.n_coarse_cells; ++cell)
{
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
this->vec_coarse.local_element(indices_coarse[i]) =
vec_fine_ptr->local_element(indices_fine[i]);
if ((scheme.n_dofs_per_cell_fine != 0) &&
(scheme.n_dofs_per_cell_coarse != 0))
for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i)
this->vec_coarse.local_element(indices_coarse[i]) =
vec_fine_ptr->local_element(indices_fine[i]);

indices_fine += scheme.n_dofs_per_cell_fine;
indices_coarse += scheme.n_dofs_per_cell_coarse;
Expand Down
201 changes: 201 additions & 0 deletions tests/multigrid-global-coarsening/fe_nothing_01.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@
// ---------------------------------------------------------------------
//
// Copyright (C) 2021 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE.md at
// the top level directory of deal.II.
//
// ---------------------------------------------------------------------


/**
* Test MGTwoLevelTransfer::interpolate() for DoFHandlers with `FE_Nothing`.
*/

#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/function_lib.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/mpi.h>
#include <deal.II/base/quadrature_lib.h>

#include <deal.II/distributed/tria.h>

#include <deal.II/dofs/dof_handler.h>

#include <deal.II/fe/fe_dgq.h>
#include <deal.II/fe/fe_nothing.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_system.h>
#include <deal.II/fe/fe_tools.h>
#include <deal.II/fe/mapping_q.h>

#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/grid_out.h>

#include <deal.II/matrix_free/fe_evaluation.h>
#include <deal.II/matrix_free/matrix_free.h>

#include <deal.II/multigrid/mg_constrained_dofs.h>
#include <deal.II/multigrid/mg_transfer_global_coarsening.h>

#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/vector_tools.h>

#include "mg_transfer_util.h"

using namespace dealii;

template <int dim>
class AnalyticalSolution : public Function<dim>
{
public:
AnalyticalSolution()
{}

virtual double
value(const Point<dim> &p, const unsigned int component = 0) const override
{
(void)component;

return p[0];
}

private:
};
Comment on lines +54 to +70
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about using

Monomial(const Tensor<1, dim, Number> &exponents,
const unsigned int n_components = 1);
with exponents = {1, 0, 0}?


template <int dim, typename Number = double>
void
do_test(const hp::FECollection<dim> &fe_fine,
const hp::FECollection<dim> &fe_coarse)
{
parallel::distributed::Triangulation<dim> tria(MPI_COMM_WORLD);

// create grid
GridGenerator::hyper_cube(tria);
tria.refine_global(2);

// setup dof-handlers
DoFHandler<dim> dof_handler_fine(tria);

if (fe_fine.size() > 1)
{
for (const auto &cell : dof_handler_fine.active_cell_iterators())
if (cell->center()[0] < 0.5)
cell->set_active_fe_index(0);
else
cell->set_active_fe_index(1);
}

dof_handler_fine.distribute_dofs(fe_fine);

DoFHandler<dim> dof_handler_coarse(tria);

if (fe_coarse.size() > 1)
{
for (const auto &cell : dof_handler_coarse.active_cell_iterators())
if (cell->center()[0] < 0.5)
cell->set_active_fe_index(0);
else
cell->set_active_fe_index(1);
}
dof_handler_coarse.distribute_dofs(fe_coarse);

AffineConstraints<Number> dummy;
dummy.close();

// setup transfer operator
MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>> transfer;
transfer.reinit_polynomial_transfer(dof_handler_fine,
dof_handler_coarse,
dummy,
dummy);

LinearAlgebra::distributed::Vector<Number> vec_fine(
dof_handler_fine.n_dofs());
LinearAlgebra::distributed::Vector<Number> vec_coarse(
dof_handler_coarse.n_dofs());

Tensor<1, dim, Number> exp;
exp[0] = 1.0;

VectorTools::interpolate(dof_handler_fine,
Functions::Monomial<dim>(exp),
vec_fine);

transfer.interpolate(vec_coarse, vec_fine);

DataOut<dim> data_out;

data_out.add_data_vector(dof_handler_coarse, vec_coarse, "solution_coarse");
data_out.add_data_vector(dof_handler_fine, vec_fine, "solution_fine");

data_out.build_patches();

static unsigned int counter = 0;

#if 0
data_out.write_vtu_with_pvtu_record(
"./", "result", counter++, tria.get_communicator(), 3, 1);
#else
deallog << std::endl;
data_out.write_vtk(deallog.get_file_stream());
#endif
}

template <int dim, typename Number>
void
test(const FiniteElement<dim> &fe_fine, const FiniteElement<dim> &fe_coarse)
{
deallog.push("0");
do_test(hp::FECollection<dim>(fe_fine), hp::FECollection<dim>(fe_coarse));
deallog.pop();
deallog.push("1");
do_test(hp::FECollection<dim>(fe_fine, FE_Nothing<dim>()),
hp::FECollection<dim>(fe_coarse));
deallog.pop();
deallog.push("2");
do_test(hp::FECollection<dim>(fe_fine),
hp::FECollection<dim>(fe_coarse, FE_Nothing<dim>()));
deallog.pop();
deallog.push("3");
do_test(hp::FECollection<dim>(fe_fine, FE_Nothing<dim>()),
hp::FECollection<dim>(fe_coarse, FE_Nothing<dim>()));
deallog.pop();
}

template <int dim, typename Number>
void
test()
{
const unsigned int fe_degree = 2;

deallog.push("CG<->CG");
test<dim, Number>(FE_Q<dim>(fe_degree), FE_Q<dim>(fe_degree));
deallog.pop();
deallog.push("DG<->CG");
test<dim, Number>(FE_DGQ<dim>(fe_degree), FE_Q<dim>(fe_degree));
deallog.pop();
deallog.push("CG<->DG");
test<dim, Number>(FE_Q<dim>(fe_degree), FE_DGQ<dim>(fe_degree));
deallog.pop();
deallog.push("DG<->DG");
test<dim, Number>(FE_DGQ<dim>(fe_degree), FE_DGQ<dim>(fe_degree));
deallog.pop();
}

int
main(int argc, char **argv)
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll all;

deallog.precision(8);

test<2, double>();
}