Skip to content

Commit

Permalink
Setup periodic boundary conditions in a scalable way
Browse files Browse the repository at this point in the history
when mesh is generated in parallel

Refs idaholab#15501
  • Loading branch information
fdkong authored and milljm committed Jul 15, 2020
1 parent 20abaf2 commit c91b1d6
Show file tree
Hide file tree
Showing 11 changed files with 338 additions and 24 deletions.
11 changes: 11 additions & 0 deletions framework/include/mesh/MooseMesh.h
Expand Up @@ -630,6 +630,11 @@ class MooseMesh : public MooseObject, public Restartable, public PerfGraphInterf
*/
void ghostGhostedBoundaries();

/**
* Whether or not we want to ghost ghosted boundaries
*/
void needGhostGhostedBoundaries(bool needghost) { _need_ghost_ghosted_boundaries = needghost; }

/**
* Getter for the patch_size parameter.
*/
Expand Down Expand Up @@ -1419,6 +1424,12 @@ class MooseMesh : public MooseObject, public Restartable, public PerfGraphInterf

/// Set of elements ghosted by ghostGhostedBoundaries
std::set<Elem *> _ghost_elems_from_ghost_boundaries;

/// A parallel mesh generator such as DistributedRectilinearMeshGenerator
/// already make everything ready. We do not need to gather all boundaries to
/// every single processor. In general, we should avoid using ghostGhostedBoundaries
/// when posssible since it is not scalable
bool _need_ghost_ghosted_boundaries;
};

/**
Expand Down
81 changes: 78 additions & 3 deletions framework/src/mesh/MooseMesh.C
Expand Up @@ -256,7 +256,8 @@ MooseMesh::MooseMesh(const InputParameters & parameters)
_init_timer(registerTimedSection("init", 2)),
_read_recovered_mesh_timer(registerTimedSection("readRecoveredMesh", 2)),
_ghost_ghosted_boundaries_timer(registerTimedSection("GhostGhostedBoundaries", 3)),
_need_delete(false)
_need_delete(false),
_need_ghost_ghosted_boundaries(true)
{
if (isParamValid("ghosting_patch_size") && (_patch_update_strategy != Moose::Iteration))
mooseError("Ghosting patch size parameter has to be set in the mesh block "
Expand Down Expand Up @@ -317,7 +318,8 @@ MooseMesh::MooseMesh(const MooseMesh & other_mesh)
_init_timer(registerTimedSection("init", 2)),
_read_recovered_mesh_timer(registerTimedSection("readRecoveredMesh", 2)),
_ghost_ghosted_boundaries_timer(registerTimedSection("GhostGhostedBoundaries", 3)),
_need_delete(other_mesh._need_delete)
_need_delete(other_mesh._need_delete),
_need_ghost_ghosted_boundaries(other_mesh._need_ghost_ghosted_boundaries)
{
// Note: this calls BoundaryInfo::operator= without changing the
// ownership semantics of either Mesh's BoundaryInfo object.
Expand Down Expand Up @@ -1569,6 +1571,75 @@ MooseMesh::detectPairedSidesets()
}
}

// For a distributed mesh, boundaries may be distributed as well. We therefore
// collect information from everyone.
// If we already performed ghostGhostedBoundaries, all boundaries are gathered
// to every single processor, then we do not do gather boundary ids here
if (_use_distributed_mesh && !_need_ghost_ghosted_boundaries)
{
// Pack all data together so that we send them via one communication
// pair: boundary side --> boundary ids.
std::vector<std::pair<boundary_id_type, boundary_id_type>> vecdata;
// We check boundaries on all dimensions
for (unsigned side_dim = 0; side_dim < dim; ++side_dim)
{
// "6" means: we have at most 6 boundaries. It is true for generated simple mesh
// "detectPairedSidesets" is designed for only simple meshes
for (auto bd = minus_x_ids[side_dim].begin(); bd != minus_x_ids[side_dim].end(); bd++)
vecdata.emplace_back(side_dim * 6 + 0, *bd);

for (auto bd = plus_x_ids[side_dim].begin(); bd != plus_x_ids[side_dim].end(); bd++)
vecdata.emplace_back(side_dim * 6 + 1, *bd);

for (auto bd = minus_y_ids[side_dim].begin(); bd != minus_y_ids[side_dim].end(); bd++)
vecdata.emplace_back(side_dim * 6 + 2, *bd);

for (auto bd = plus_y_ids[side_dim].begin(); bd != plus_y_ids[side_dim].end(); bd++)
vecdata.emplace_back(side_dim * 6 + 3, *bd);

for (auto bd = minus_z_ids[side_dim].begin(); bd != minus_z_ids[side_dim].end(); bd++)
vecdata.emplace_back(side_dim * 6 + 4, *bd);

for (auto bd = plus_z_ids[side_dim].begin(); bd != plus_z_ids[side_dim].end(); bd++)
vecdata.emplace_back(side_dim * 6 + 5, *bd);
}

_communicator.allgather(vecdata, false);

// Unpack data, and add them into minus/plus_x/y_ids
for (auto pair = vecdata.begin(); pair != vecdata.end(); pair++)
{
// Convert data from the long vector, and add data to separated sets
auto side_dim = pair->first / 6;
auto side = pair->first % 6;

switch (side)
{
case 0:
minus_x_ids[side_dim].insert(pair->second);
break;
case 1:
plus_x_ids[side_dim].insert(pair->second);
break;
case 2:
minus_y_ids[side_dim].insert(pair->second);
break;
case 3:
plus_y_ids[side_dim].insert(pair->second);
break;
case 4:
minus_z_ids[side_dim].insert(pair->second);
break;
case 5:
plus_z_ids[side_dim].insert(pair->second);
break;
default:
mooseError("Unknown boundary side ", side);
}
}

} // end if (_use_distributed_mesh && !_need_ghost_ghosted_boundaries)

for (unsigned side_dim = 0; side_dim < dim; ++side_dim)
{
// If unique pairings were found, fill up the _paired_boundary data
Expand Down Expand Up @@ -2559,7 +2630,10 @@ void
MooseMesh::ghostGhostedBoundaries()
{
// No need to do this if using a serial mesh
if (!_use_distributed_mesh)
// We do not need to ghost boundary elements when _need_ghost_ghosted_boundaries
// is not true. _need_ghost_ghosted_boundaries can be set by a mesh generator
// where boundaries are already ghosted accordingly
if (!_use_distributed_mesh || !_need_ghost_ghosted_boundaries)
return;

TIME_SECTION(_ghost_ghosted_boundaries_timer);
Expand Down Expand Up @@ -2621,6 +2695,7 @@ MooseMesh::ghostGhostedBoundaries()
connected_nodes_to_ghost.begin(),
connected_nodes_to_ghost.end(),
extra_ghost_elem_inserter<Node>(mesh));

mesh.comm().allgather_packed_range(&mesh,
boundary_elems_to_ghost.begin(),
boundary_elems_to_ghost.end(),
Expand Down
58 changes: 37 additions & 21 deletions framework/src/meshgenerators/DistributedRectilinearMeshGenerator.C
Expand Up @@ -134,9 +134,19 @@ DistributedRectilinearMeshGenerator::getNeighbors<Edge2>(const dof_id_type nx,
const dof_id_type /*j*/,
const dof_id_type /*k*/,
std::vector<dof_id_type> & neighbors,
const bool /*corner*/)
const bool corner)

{
if (corner)
{
// The elements on the opposite of the current boundary are required
// for periodic boundary conditions
neighbors[0] = (i - 1 + nx) % nx;
neighbors[1] = (i + 1 + nx) % nx;

return;
}

neighbors[0] = i - 1;
neighbors[1] = i + 1;

Expand Down Expand Up @@ -169,26 +179,17 @@ DistributedRectilinearMeshGenerator::getGhostNeighbors<Edge2>(const dof_id_type
const MeshBase & mesh,
std::set<dof_id_type> & ghost_elems)
{
auto & boundary_info = mesh.get_boundary_info();

std::vector<dof_id_type> neighbors(2);

for (auto elem_ptr : mesh.element_ptr_range())
{
for (unsigned int s = 0; s < elem_ptr->n_sides(); s++)
{
// No current neighbor
if (!elem_ptr->neighbor_ptr(s))
{
// Not on a boundary
if (!boundary_info.n_boundary_ids(elem_ptr, s))
{
getNeighbors<Edge2>(nx, 0, 0, elem_ptr->id(), 0, 0, neighbors, false);
auto elem_id = elem_ptr->id();

ghost_elems.insert(neighbors[s]);
}
}
}
getNeighbors<Edge2>(nx, 0, 0, elem_id, 0, 0, neighbors, true);

for (auto neighbor : neighbors)
if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
ghost_elems.insert(neighbor);
}
}

Expand Down Expand Up @@ -337,11 +338,14 @@ DistributedRectilinearMeshGenerator::getNeighbors<Quad4>(const dof_id_type nx,
if (corner)
{
// libMesh dof_id_type looks like unsigned int
// We add one layer of point neighbors by default. Besides,
// The elements on the opposite side of the current boundary are included
// for, in case, periodic boundary conditions. The overhead is negligible
// since you could consider every element has the same number of neighbors
unsigned int nnb = 0;
for (unsigned int ii = 0; ii <= 2; ii++)
for (unsigned int jj = 0; jj <= 2; jj++)
if ((i + ii >= 1) && (i + ii <= nx) && (j + jj >= 1) && (j + jj <= ny))
neighbors[nnb++] = elemId<Quad4>(nx, 0, i + ii - 1, j + jj - 1, 0);
neighbors[nnb++] = elemId<Quad4>(nx, 0, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, 0);

return;
}
Expand Down Expand Up @@ -600,13 +604,17 @@ DistributedRectilinearMeshGenerator::getNeighbors<Hex8>(const dof_id_type nx,

if (corner)
{
// We collect one layer of point neighbors
// We add one layer of point neighbors by default. Besides,
// The elements on the opposite side of the current boundary are included
// for, in case, periodic boundary conditions. The overhead is negligible
// since you could consider every element has the same number of neighbors
unsigned int nnb = 0;
for (unsigned int ii = 0; ii <= 2; ii++)
for (unsigned int jj = 0; jj <= 2; jj++)
for (unsigned int kk = 0; kk <= 2; kk++)
if ((i + ii >= 1) && (i + ii <= nx) && (j + jj >= 1) && (j + jj <= ny) && (k + kk >= 1) &&
(k + kk <= nz))
neighbors[nnb++] = elemId<Hex8>(nx, ny, i + ii - 1, j + jj - 1, k + kk - 1);
neighbors[nnb++] = elemId<Hex8>(
nx, ny, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, (k + kk - 1 + nz) % nz);

return;
}
Expand Down Expand Up @@ -1015,6 +1023,10 @@ DistributedRectilinearMeshGenerator::buildCube(UnstructuredMesh & mesh,
bool old_allow_renumbering = mesh.allow_renumbering();
mesh.allow_renumbering(false);
mesh.allow_find_neighbors(false);
// No, I do not want to remove anything in case periodic boundary conditions
// are required late. We stop libMesh from deleting the valuable remote
// elements paired with the local elements
mesh.allow_remote_element_removal(false);
mesh.prepare_for_use();

// But we'll want to at least find neighbors after any future mesh changes!
Expand All @@ -1030,6 +1042,10 @@ DistributedRectilinearMeshGenerator::generate()
{
// DistributedRectilinearMeshGenerator always generates a distributed mesh
_mesh->setParallelType(MooseMesh::ParallelType::DISTRIBUTED);
// We will set up boundaries accordingly. We do not want to call
// ghostGhostedBoundaries in which allgather_packed_range is unscalable.
// ghostGhostedBoundaries will gather all boundaries to every single processor
_mesh->needGhostGhostedBoundaries(false);
auto mesh = _mesh->buildMeshBaseObject(MooseMesh::ParallelType::DISTRIBUTED);

MooseEnum elem_type_enum = getParam<MooseEnum>("elem_type");
Expand Down
32 changes: 32 additions & 0 deletions test/include/userobjects/CheckGhostedBoundaries.h
@@ -0,0 +1,32 @@
//* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions
//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html

#pragma once

#include "GeneralUserObject.h"

/**
* A naive way to check if all boundaries are gathered to every single processor.
*/
class CheckGhostedBoundaries : public GeneralUserObject
{
public:
static InputParameters validParams();

CheckGhostedBoundaries(const InputParameters & params);

virtual void initialSetup(){};

virtual void initialize(){};
virtual void execute();
virtual void finalize(){};

private:
dof_id_type _total_num_bdry_sides;
};
37 changes: 37 additions & 0 deletions test/src/userobjects/CheckGhostedBoundaries.C
@@ -0,0 +1,37 @@
//* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions
//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html

#include "CheckGhostedBoundaries.h"

registerMooseObject("MooseTestApp", CheckGhostedBoundaries);

InputParameters
CheckGhostedBoundaries::validParams()
{
InputParameters params = GeneralUserObject::validParams();
params.addRequiredParam<dof_id_type>("total_num_bdry_sides", "Total number of boundary sides");
return params;
}

CheckGhostedBoundaries::CheckGhostedBoundaries(const InputParameters & params)
: GeneralUserObject(params), _total_num_bdry_sides(getParam<dof_id_type>("total_num_bdry_sides"))
{
}

void
CheckGhostedBoundaries::execute()
{
dof_id_type nelems = _fe_problem.mesh().getMesh().get_boundary_info().build_side_list().size();

// If the total number of boundary sides is the same as the number of local boundary sides,
// we will conclude all-gather happens. It is not necessarily true, but it works fine
// for setting up tests
if (_total_num_bdry_sides == nelems)
mooseError("All boundaries are ghosted to every single processor, and it is not scalable.");
}

0 comments on commit c91b1d6

Please sign in to comment.