Permalink
Fetching contributors…
Cannot retrieve contributors at this time
269 lines (236 sloc) 9.21 KB
// MFEM Example 13 - Parallel Version
//
// Compile with: make ex13p
//
// Sample runs: mpirun -np 4 ex13p -m ../data/star.mesh
// mpirun -np 4 ex13p -m ../data/square-disc.mesh -o 2
// mpirun -np 4 ex13p -m ../data/beam-tet.mesh
// mpirun -np 4 ex13p -m ../data/beam-hex.mesh
// mpirun -np 4 ex13p -m ../data/escher.mesh
// mpirun -np 4 ex13p -m ../data/fichera.mesh
// mpirun -np 4 ex13p -m ../data/fichera-q2.vtk
// mpirun -np 4 ex13p -m ../data/fichera-q3.mesh
// mpirun -np 4 ex13p -m ../data/square-disc-nurbs.mesh
// mpirun -np 4 ex13p -m ../data/beam-hex-nurbs.mesh
// mpirun -np 4 ex13p -m ../data/amr-quad.mesh -o 2
// mpirun -np 4 ex13p -m ../data/amr-hex.mesh
// mpirun -np 4 ex13p -m ../data/mobius-strip.mesh -n 8 -o 2
// mpirun -np 4 ex13p -m ../data/klein-bottle.mesh -n 10 -o 2
//
// Description: This example code solves the Maxwell (electromagnetic)
// eigenvalue problem curl curl E = lambda E with homogeneous
// Dirichlet boundary conditions E x n = 0.
//
// We compute a number of the lowest nonzero eigenmodes by
// discretizing the curl curl operator using a Nedelec FE space of
// the specified order in 2D or 3D.
//
// The example highlights the use of the AME subspace eigenvalue
// solver from HYPRE, which uses LOBPCG and AMS internally.
// Reusing a single GLVis visualization window for multiple
// eigenfunctions is also illustrated.
//
// We recommend viewing examples 3 and 11 before viewing this
// example.
#include "mfem.hpp"
#include <fstream>
#include <iostream>
using namespace std;
using namespace mfem;
int main(int argc, char *argv[])
{
// 1. Initialize MPI.
int num_procs, myid;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
// 2. Parse command-line options.
const char *mesh_file = "../data/beam-tet.mesh";
int ser_ref_levels = 2;
int par_ref_levels = 1;
int order = 1;
int nev = 5;
bool visualization = 1;
OptionsParser args(argc, argv);
args.AddOption(&mesh_file, "-m", "--mesh",
"Mesh file to use.");
args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
"Number of times to refine the mesh uniformly in serial.");
args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
"Number of times to refine the mesh uniformly in parallel.");
args.AddOption(&order, "-o", "--order",
"Finite element order (polynomial degree) or -1 for"
" isoparametric space.");
args.AddOption(&nev, "-n", "--num-eigs",
"Number of desired eigenmodes.");
args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
"--no-visualization",
"Enable or disable GLVis visualization.");
args.Parse();
if (!args.Good())
{
if (myid == 0)
{
args.PrintUsage(cout);
}
MPI_Finalize();
return 1;
}
if (myid == 0)
{
args.PrintOptions(cout);
}
// 3. Read the (serial) mesh from the given mesh file on all processors. We
// can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
// and volume meshes with the same code.
Mesh *mesh = new Mesh(mesh_file, 1, 1);
int dim = mesh->Dimension();
// 4. Refine the serial mesh on all processors to increase the resolution. In
// this example we do 'ref_levels' of uniform refinement (2 by default, or
// specified on the command line with -rs).
for (int lev = 0; lev < ser_ref_levels; lev++)
{
mesh->UniformRefinement();
}
// 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
// this mesh further in parallel to increase the resolution (1 time by
// default, or specified on the command line with -rp). Once the parallel
// mesh is defined, the serial mesh can be deleted.
ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
delete mesh;
for (int lev = 0; lev < par_ref_levels; lev++)
{
pmesh->UniformRefinement();
}
// 6. Define a parallel finite element space on the parallel mesh. Here we
// use the Nedelec finite elements of the specified order.
FiniteElementCollection *fec = new ND_FECollection(order, dim);
ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
HYPRE_Int size = fespace->GlobalTrueVSize();
if (myid == 0)
{
cout << "Number of unknowns: " << size << endl;
}
// 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
// element space. The first corresponds to the curl curl, while the second
// is a simple mass matrix needed on the right hand side of the
// generalized eigenvalue problem below. The boundary conditions are
// implemented by marking all the boundary attributes from the mesh as
// essential. The corresponding degrees of freedom are eliminated with
// special values on the diagonal to shift the Dirichlet eigenvalues out
// of the computational range. After serial and parallel assembly we
// extract the corresponding parallel matrices A and M.
ConstantCoefficient one(1.0);
Array<int> ess_bdr;
if (pmesh->bdr_attributes.Size())
{
ess_bdr.SetSize(pmesh->bdr_attributes.Max());
ess_bdr = 1;
}
ParBilinearForm *a = new ParBilinearForm(fespace);
a->AddDomainIntegrator(new CurlCurlIntegrator(one));
if (pmesh->bdr_attributes.Size() == 0)
{
// Add a mass term if the mesh has no boundary, e.g. periodic mesh or
// closed surface.
a->AddDomainIntegrator(new VectorFEMassIntegrator(one));
}
a->Assemble();
a->EliminateEssentialBCDiag(ess_bdr, 1.0);
a->Finalize();
ParBilinearForm *m = new ParBilinearForm(fespace);
m->AddDomainIntegrator(new VectorFEMassIntegrator(one));
m->Assemble();
// shift the eigenvalue corresponding to eliminated dofs to a large value
m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
m->Finalize();
HypreParMatrix *A = a->ParallelAssemble();
HypreParMatrix *M = m->ParallelAssemble();
delete a;
delete m;
// 8. Define and configure the AME eigensolver and the AMS preconditioner for
// A to be used within the solver. Set the matrices which define the
// generalized eigenproblem A x = lambda M x.
HypreAMS *ams = new HypreAMS(*A,fespace);
ams->SetPrintLevel(0);
ams->SetSingularProblem();
HypreAME *ame = new HypreAME(MPI_COMM_WORLD);
ame->SetNumModes(nev);
ame->SetPreconditioner(*ams);
ame->SetMaxIter(100);
ame->SetTol(1e-8);
ame->SetPrintLevel(1);
ame->SetMassMatrix(*M);
ame->SetOperator(*A);
// 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
// parallel grid function to represent each of the eigenmodes returned by
// the solver.
Array<double> eigenvalues;
ame->Solve();
ame->GetEigenvalues(eigenvalues);
ParGridFunction x(fespace);
// 10. Save the refined mesh and the modes in parallel. This output can be
// viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
{
ostringstream mesh_name, mode_name;
mesh_name << "mesh." << setfill('0') << setw(6) << myid;
ofstream mesh_ofs(mesh_name.str().c_str());
mesh_ofs.precision(8);
pmesh->Print(mesh_ofs);
for (int i=0; i<nev; i++)
{
// convert eigenvector from HypreParVector to ParGridFunction
x = ame->GetEigenvector(i);
mode_name << "mode_" << setfill('0') << setw(2) << i << "."
<< setfill('0') << setw(6) << myid;
ofstream mode_ofs(mode_name.str().c_str());
mode_ofs.precision(8);
x.Save(mode_ofs);
mode_name.str("");
}
}
// 11. Send the solution by socket to a GLVis server.
if (visualization)
{
char vishost[] = "localhost";
int visport = 19916;
socketstream mode_sock(vishost, visport);
mode_sock.precision(8);
for (int i=0; i<nev; i++)
{
if ( myid == 0 )
{
cout << "Eigenmode " << i+1 << '/' << nev
<< ", Lambda = " << eigenvalues[i] << endl;
}
// convert eigenvector from HypreParVector to ParGridFunction
x = ame->GetEigenvector(i);
mode_sock << "parallel " << num_procs << " " << myid << "\n"
<< "solution\n" << *pmesh << x << flush
<< "window_title 'Eigenmode " << i+1 << '/' << nev
<< ", Lambda = " << eigenvalues[i] << "'" << endl;
char c;
if (myid == 0)
{
cout << "press (q)uit or (c)ontinue --> " << flush;
cin >> c;
}
MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
if (c != 'c')
{
break;
}
}
mode_sock.close();
}
// 12. Free the used memory.
delete ame;
delete ams;
delete M;
delete A;
delete fespace;
delete fec;
delete pmesh;
MPI_Finalize();
return 0;
}