diff --git a/Crystal_Growth_Phase_Field_Model/solve.cpp b/Crystal_Growth_Phase_Field_Model/solve.cpp index 21d26f48..05e4b1c1 100644 --- a/Crystal_Growth_Phase_Field_Model/solve.cpp +++ b/Crystal_Growth_Phase_Field_Model/solve.cpp @@ -13,7 +13,7 @@ void PhaseFieldSolver::solve(){ //Using a direct parallel solver SolverControl cn; - PETScWrappers::SparseDirectMUMPS A_direct(cn, mpi_communicator); + PETScWrappers::SparseDirectMUMPS A_direct(cn); A_direct.solve(jacobian_matrix, solution_update, system_rhs); //Updating the solution by adding the delta solution conv_solution.add(1, solution_update); diff --git a/time_dependent_navier_stokes/time_dependent_navier_stokes.cc b/time_dependent_navier_stokes/time_dependent_navier_stokes.cc index b2e3f138..7552777d 100644 --- a/time_dependent_navier_stokes/time_dependent_navier_stokes.cc +++ b/time_dependent_navier_stokes/time_dependent_navier_stokes.cc @@ -493,8 +493,7 @@ namespace fluid TimerOutput::Scope timer_section(timer, "CG for Mp"); SolverControl mp_control(src.block(1).size(), 1e-6 * src.block(1).l2_norm()); - PETScWrappers::SolverCG cg_mp(mp_control, - mass_schur->get_mpi_communicator()); + PETScWrappers::SolverCG cg_mp(mp_control); // $-(\nu + \gamma)M_p^{-1}v_1$ PETScWrappers::PreconditionBlockJacobi Mp_preconditioner; Mp_preconditioner.initialize(mass_matrix->block(1, 1)); @@ -507,8 +506,7 @@ namespace fluid TimerOutput::Scope timer_section(timer, "CG for Sm"); SolverControl sm_control(src.block(1).size(), 1e-6 * src.block(1).l2_norm()); - PETScWrappers::SolverCG cg_sm(sm_control, - mass_schur->get_mpi_communicator()); + PETScWrappers::SolverCG cg_sm(sm_control); // PreconditionBlockJacobi works find on Sm if we do not refine the mesh. // Because after refine_mesh is called, zero entries will be created on // the diagonal (not sure why), which prevents PreconditionBlockJacobi @@ -531,8 +529,7 @@ namespace fluid TimerOutput::Scope timer_section(timer, "CG for A"); SolverControl a_control(src.block(0).size(), 1e-6 * src.block(0).l2_norm()); - PETScWrappers::SolverCG cg_a(a_control, - mass_schur->get_mpi_communicator()); + PETScWrappers::SolverCG cg_a(a_control); // We do not use any preconditioner for this block, which is of course // slow, // only because the performance of the only two preconditioners available diff --git a/two_phase_flow/LevelSetSolver.cc b/two_phase_flow/LevelSetSolver.cc index fd1f078e..28527278 100644 --- a/two_phase_flow/LevelSetSolver.cc +++ b/two_phase_flow/LevelSetSolver.cc @@ -1524,7 +1524,7 @@ void LevelSetSolver::solve(const AffineConstraints &constraints, { // all vectors are NON-GHOSTED SolverControl solver_control (dof_handler_LS.n_dofs(), solver_tolerance); - PETScWrappers::SolverCG solver(solver_control, mpi_communicator); + PETScWrappers::SolverCG solver(solver_control); constraints.distribute (completely_distributed_solution); solver.solve (Matrix, completely_distributed_solution, rhs, *preconditioner); constraints.distribute (completely_distributed_solution); diff --git a/two_phase_flow/NavierStokesSolver.cc b/two_phase_flow/NavierStokesSolver.cc index d64deea6..9b3e6ec1 100644 --- a/two_phase_flow/NavierStokesSolver.cc +++ b/two_phase_flow/NavierStokesSolver.cc @@ -1014,10 +1014,10 @@ void NavierStokesSolver::solve_U(const AffineConstraints &constrain const PETScWrappers::MPI::Vector &rhs) { SolverControl solver_control(dof_handler_U.n_dofs(),1e-6); - //PETScWrappers::SolverCG solver(solver_control, mpi_communicator); - //PETScWrappers::SolverGMRES solver(solver_control, mpi_communicator); - //PETScWrappers::SolverChebychev solver(solver_control, mpi_communicator); - PETScWrappers::SolverBicgstab solver(solver_control,mpi_communicator); + //PETScWrappers::SolverCG solver(solver_control); + //PETScWrappers::SolverGMRES solver(solver_control); + //PETScWrappers::SolverChebychev solver(solver_control); + PETScWrappers::SolverBicgstab solver(solver_control); constraints.distribute(completely_distributed_solution); solver.solve(Matrix,completely_distributed_solution,rhs,*preconditioner); constraints.distribute(completely_distributed_solution); @@ -1035,8 +1035,8 @@ void NavierStokesSolver::solve_P(const AffineConstraints &constrain const PETScWrappers::MPI::Vector &rhs) { SolverControl solver_control(dof_handler_P.n_dofs(),1e-6); - PETScWrappers::SolverCG solver(solver_control,mpi_communicator); - //PETScWrappers::SolverGMRES solver(solver_control, mpi_communicator); + PETScWrappers::SolverCG solver(solver_control); + //PETScWrappers::SolverGMRES solver(solver_control); constraints.distribute(completely_distributed_solution); solver.solve(Matrix,completely_distributed_solution,rhs,*preconditioner); constraints.distribute(completely_distributed_solution);