Fetching contributors…
Cannot retrieve contributors at this time
274 lines (240 sloc) 10 KB
// MFEM Example 6 - Parallel Version
// Compile with: make ex6p
// Sample runs: mpirun -np 4 ex6p -m ../data/square-disc.mesh -o 1
// mpirun -np 4 ex6p -m ../data/square-disc.mesh -o 2
// mpirun -np 4 ex6p -m ../data/square-disc-nurbs.mesh -o 2
// mpirun -np 4 ex6p -m ../data/star.mesh -o 3
// mpirun -np 4 ex6p -m ../data/escher.mesh -o 2
// mpirun -np 4 ex6p -m ../data/fichera.mesh -o 2
// mpirun -np 4 ex6p -m ../data/disc-nurbs.mesh -o 2
// mpirun -np 4 ex6p -m ../data/ball-nurbs.mesh
// mpirun -np 4 ex6p -m ../data/pipe-nurbs.mesh
// mpirun -np 4 ex6p -m ../data/star-surf.mesh -o 2
// mpirun -np 4 ex6p -m ../data/square-disc-surf.mesh -o 2
// mpirun -np 4 ex6p -m ../data/amr-quad.mesh
// Description: This is a version of Example 1 with a simple adaptive mesh
// refinement loop. The problem being solved is again the Laplace
// equation -Delta u = 1 with homogeneous Dirichlet boundary
// conditions. The problem is solved on a sequence of meshes which
// are locally refined in a conforming (triangles, tetrahedrons)
// or non-conforming (quadrilateral, hexahedrons) manner according
// to a simple ZZ error estimator.
// The example demonstrates MFEM's capability to work with both
// conforming and nonconforming refinements, in 2D and 3D, on
// linear, curved and surface meshes. Interpolation of functions
// from coarse to fine meshes, as well as persistent GLVis
// visualization are also illustrated.
// We recommend viewing Example 1 before viewing this example.
#include "mfem.hpp"
#include <fstream>
#include <iostream>
using namespace std;
using namespace mfem;
int main(int argc, char *argv[])
// 1. Initialize MPI.
int num_procs, myid;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
// 2. Parse command-line options.
const char *mesh_file = "../data/star.mesh";
int order = 1;
bool visualization = true;
OptionsParser args(argc, argv);
args.AddOption(&mesh_file, "-m", "--mesh",
"Mesh file to use.");
args.AddOption(&order, "-o", "--order",
"Finite element order (polynomial degree).");
args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
"Enable or disable GLVis visualization.");
if (!args.Good())
if (myid == 0)
return 1;
if (myid == 0)
// 3. Read the (serial) mesh from the given mesh file on all processors. We
// can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
// and volume meshes with the same code.
Mesh *mesh = new Mesh(mesh_file, 1, 1);
int dim = mesh->Dimension();
int sdim = mesh->SpaceDimension();
// 4. Refine the serial mesh on all processors to increase the resolution.
// Also project a NURBS mesh to a piecewise-quadratic curved mesh. Make
// sure that the mesh is non-conforming.
if (mesh->NURBSext)
// 5. Define a parallel mesh by partitioning the serial mesh.
// Once the parallel mesh is defined, the serial mesh can be deleted.
ParMesh pmesh(MPI_COMM_WORLD, *mesh);
delete mesh;
MFEM_VERIFY(pmesh.bdr_attributes.Size() > 0,
"Boundary attributes required in the mesh.");
Array<int> ess_bdr(pmesh.bdr_attributes.Max());
ess_bdr = 1;
// 6. Define a finite element space on the mesh. The polynomial order is
// one (linear) by default, but this can be changed on the command line.
H1_FECollection fec(order, dim);
ParFiniteElementSpace fespace(&pmesh, &fec);
// 7. As in Example 1p, we set up bilinear and linear forms corresponding to
// the Laplace problem -\Delta u = 1. We don't assemble the discrete
// problem yet, this will be done in the main loop.
ParBilinearForm a(&fespace);
ParLinearForm b(&fespace);
ConstantCoefficient one(1.0);
BilinearFormIntegrator *integ = new DiffusionIntegrator(one);
b.AddDomainIntegrator(new DomainLFIntegrator(one));
// 8. The solution vector x and the associated finite element grid function
// will be maintained over the AMR iterations. We initialize it to zero.
ParGridFunction x(&fespace);
x = 0;
// 9. Connect to GLVis.
char vishost[] = "localhost";
int visport = 19916;
socketstream sout;
if (visualization)
{, visport);
if (!sout)
if (myid == 0)
cout << "Unable to connect to GLVis server at "
<< vishost << ':' << visport << endl;
cout << "GLVis visualization disabled.\n";
visualization = false;
// 10. Set up an error estimator. Here we use the Zienkiewicz-Zhu estimator
// with L2 projection in the smoothing step to better handle hanging
// nodes and parallel partitioning. We need to supply a space for the
// discontinuous flux (L2) and a space for the smoothed flux (H(div) is
// used here).
L2_FECollection flux_fec(order, dim);
ParFiniteElementSpace flux_fes(&pmesh, &flux_fec, sdim);
RT_FECollection smooth_flux_fec(order-1, dim);
ParFiniteElementSpace smooth_flux_fes(&pmesh, &smooth_flux_fec);
// Another possible option for the smoothed flux space:
// H1_FECollection smooth_flux_fec(order, dim);
// ParFiniteElementSpace smooth_flux_fes(&pmesh, &smooth_flux_fec, dim);
L2ZienkiewiczZhuEstimator estimator(*integ, x, flux_fes, smooth_flux_fes);
// 11. A refiner selects and refines elements based on a refinement strategy.
// The strategy here is to refine elements with errors larger than a
// fraction of the maximum element error. Other strategies are possible.
// The refiner will call the given error estimator.
ThresholdRefiner refiner(estimator);
// 12. The main AMR loop. In each iteration we solve the problem on the
// current mesh, visualize the solution, and refine the mesh.
const int max_dofs = 100000;
for (int it = 0; ; it++)
HYPRE_Int global_dofs = fespace.GlobalTrueVSize();
if (myid == 0)
cout << "\nAMR iteration " << it << endl;
cout << "Number of unknowns: " << global_dofs << endl;
// 13. Assemble the stiffness matrix and the right-hand side. Note that
// MFEM doesn't care at this point that the mesh is nonconforming
// and parallel. The FE space is considered 'cut' along hanging
// edges/faces, and also across processor boundaries.
// 14. Create the parallel linear system: eliminate boundary conditions,
// constrain hanging nodes and nodes across processor boundaries.
// The system will be solved for true (unconstrained/unique) DOFs only.
Array<int> ess_tdof_list;
fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
HypreParMatrix A;
Vector B, X;
const int copy_interior = 1;
a.FormLinearSystem(ess_tdof_list, x, b, A, X, B, copy_interior);
// 15. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
// preconditioner from hypre.
HypreBoomerAMG amg;
CGSolver pcg(A.GetComm());
pcg.SetPrintLevel(3); // print the first and the last iterations only
pcg.Mult(B, X);
// 16. Extract the parallel grid function corresponding to the finite element
// approximation X. This is the local solution on each processor.
a.RecoverFEMSolution(X, b, x);
// 17. Send the solution by socket to a GLVis server.
if (visualization)
sout << "parallel " << num_procs << " " << myid << "\n";
sout << "solution\n" << pmesh << x << flush;
if (global_dofs > max_dofs)
if (myid == 0)
cout << "Reached the maximum number of dofs. Stop." << endl;
// 18. Call the refiner to modify the mesh. The refiner calls the error
// estimator to obtain element errors, then it selects elements to be
// refined and finally it modifies the mesh. The Stop() method can be
// used to determine if a stopping criterion was met.
if (refiner.Stop())
if (myid == 0)
cout << "Stopping criterion satisfied. Stop." << endl;
// 19. Update the finite element space (recalculate the number of DOFs,
// etc.) and create a grid function update matrix. Apply the matrix
// to any GridFunctions over the space. In this case, the update
// matrix is an interpolation matrix so the updated GridFunction will
// still represent the same function as before refinement.
// 20. Load balance the mesh, and update the space and solution. Currently
// available only for nonconforming meshes.
if (pmesh.Nonconforming())
// Update the space and the GridFunction. This time the update matrix
// redistributes the GridFunction among the processors.
// 21. Inform also the bilinear and linear forms that the space has
// changed.
return 0;