-
Notifications
You must be signed in to change notification settings - Fork 707
/
step-70.cc
1991 lines (1721 loc) · 88.5 KB
/
step-70.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* ---------------------------------------------------------------------
*
* Copyright (C) 2020 - 2021 by the deal.II authors
*
* This file is part of the deal.II library.
*
* The deal.II library is free software; you can use it, redistribute
* it, and/or modify it under the terms of the GNU Lesser General
* Public License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* The full text of the license can be found in the file LICENSE.md at
* the top level directory of deal.II.
*
* ---------------------------------------------------------------------
*
* Authors: Luca Heltai, Bruno Blais, Rene Gassmoeller, 2020
*/
// @sect3{Include files}
// Most of these have been introduced elsewhere, we'll comment only on the new
// ones. The switches close to the top that allow selecting between PETSc
// and Trilinos linear algebra capabilities are similar to the ones in
// step-40 and step-50.
#include <deal.II/base/function.h>
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/timer.h>
#include <deal.II/lac/block_linear_operator.h>
#include <deal.II/lac/generic_linear_algebra.h>
#include <deal.II/lac/linear_operator.h>
#include <deal.II/lac/linear_operator_tools.h>
#define FORCE_USE_OF_TRILINOS
namespace LA
{
#if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \
!(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS))
using namespace dealii::LinearAlgebraPETSc;
# define USE_PETSC_LA
#elif defined(DEAL_II_WITH_TRILINOS)
using namespace dealii::LinearAlgebraTrilinos;
#else
# error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
#endif
} // namespace LA
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/index_set.h>
#include <deal.II/base/parameter_acceptor.h>
#include <deal.II/base/parsed_function.h>
#include <deal.II/base/utilities.h>
#include <deal.II/distributed/grid_refinement.h>
#include <deal.II/distributed/solution_transfer.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_renumbering.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/fe_nothing.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_system.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/fe/mapping_fe_field.h>
#include <deal.II/fe/mapping_q.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/grid_in.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/manifold_lib.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/solver_gmres.h>
#include <deal.II/lac/solver_minres.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/vector.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/error_estimator.h>
#include <deal.II/numerics/vector_tools.h>
// These are the only new include files with regard to step-60. In this
// tutorial, the non-matching coupling between the solid and the fluid is
// computed using an intermediate data structure that keeps track of how the
// locations of quadrature points of the solid evolve within the fluid mesh.
// This data structure needs to keep track of the position of the quadrature
// points on each cell describing the solid domain, of the quadrature weights,
// and possibly of the normal vector to each point, if the solid domain is of
// co-dimension one.
//
// Deal.II offers these facilities in the Particles namespace, through the
// ParticleHandler class. ParticleHandler is a class that allows you to manage
// a collection of particles (objects of type Particles::Particle), representing
// a collection of points with some attached properties (e.g., an id) floating
// on a parallel::distributed::Triangulation. The methods and classes in the
// namespace Particles allows one to easily implement Particle-In-Cell methods
// and particle tracing on distributed triangulations.
//
// We "abuse" this data structure to store information about the location of
// solid quadrature points embedded in the surrounding fluid grid, including
// integration weights, and possibly surface normals. The reason why we use this
// additional data structure is related to the fact that the solid and the fluid
// grids might be non-overlapping, and if we were using two separate
// triangulation objects, would be distributed independently among parallel
// processes.
//
// In order to couple the two problems, we rely on the ParticleHandler class,
// storing in each particle the position of a solid quadrature point (which is
// in general not aligned to any of the fluid quadrature points), its weight,
// and any other information that may be required to couple the two problems.
// These locations are then propagated along with the (prescribed) velocity
// of the solid impeller.
//
// Ownership of the solid quadrature points is initially inherited from the MPI
// partitioning on the solid mesh itself. The Particles so generated are later
// distributed to the fluid mesh using the methods of the ParticleHandler class.
// This allows transparent exchange of information between MPI processes about
// the overlapping pattern between fluid cells and solid quadrature points.
#include <deal.II/particles/data_out.h>
#include <deal.II/particles/generators.h>
#include <deal.II/particles/particle_handler.h>
#include <deal.II/particles/utilities.h>
// When generating the grids, we allow reading it from a file, and if deal.II
// has been built with OpenCASCADE support, we also allow reading CAD files and
// use them as manifold descriptors for the grid (see step-54 for a detailed
// description of the various Manifold descriptors that are available in the
// OpenCASCADE namespace)
#include <deal.II/opencascade/manifold_lib.h>
#include <deal.II/opencascade/utilities.h>
#ifdef DEAL_II_WITH_OPENCASCADE
# include <TopoDS.hxx>
#endif
#include <cmath>
#include <fstream>
#include <iostream>
#include <memory>
namespace Step70
{
using namespace dealii;
// @sect3{Run-time parameter handling}
// Similarly to what we have done in step-60, we set up a class that holds
// all the parameters of our problem and derive it from the ParameterAcceptor
// class to simplify the management and creation of parameter files.
//
// The ParameterAcceptor paradigm requires all parameters to be writable by
// the ParameterAcceptor methods. In order to avoid bugs that would be very
// difficult to track down (such as writing things like `time = 0` instead of
// `time == 0`), we declare all the parameters in an external class, which is
// initialized before the actual `StokesImmersedProblem` class, and pass it to
// the main class as a `const` reference.
//
// The constructor of the class is responsible for the connection between the
// members of this class and the corresponding entries in the
// ParameterHandler. Thanks to the use of the
// ParameterHandler::add_parameter() method, this connection is trivial, but
// requires all members of this class to be writeable.
template <int dim, int spacedim = dim>
class StokesImmersedProblemParameters : public ParameterAcceptor
{
public:
StokesImmersedProblemParameters();
// however, since this class will be passed as a `const` reference to the
// StokesImmersedProblem class, we have to make sure we can still set the
// time correctly in the objects derived by the Function class defined
// herein. In order to do so, we declare both the
// `StokesImmersedProblemParameters::rhs` and
// `StokesImmersedProblemParameters::angular_velocity` members to be
// `mutable`, and define the following little helper method that sets their
// time to the correct value.
void set_time(const double &time) const
{
rhs.set_time(time);
angular_velocity.set_time(time);
}
// The remainder of the class consists largely of member variables that
// describe the details of the simulation and its discretization. The
// following parameters are about where output should land, the spatial and
// temporal discretization (the default is the $Q_2\times Q_1$ Taylor-Hood
// discretization which uses a polynomial degree of 2 for the velocity), and
// how many time steps should elapse before we generate graphical output
// again:
std::string output_directory = ".";
unsigned int velocity_degree = 2;
unsigned int number_of_time_steps = 501;
double final_time = 1.0;
unsigned int output_frequency = 1;
// We allow every grid to be refined independently. In this tutorial, no
// physics is resolved on the solid grid, and its velocity is given as a
// datum. However it is relatively straightforward to incorporate some
// elasticity model in this tutorial, and transform it into a fully fledged
// FSI solver.
unsigned int initial_fluid_refinement = 5;
unsigned int initial_solid_refinement = 5;
unsigned int particle_insertion_refinement = 3;
// To provide a rough description of the fluid domain, we use the method
// extract_rtree_level() applied to the tree of bounding boxes of each
// locally owned cell of the fluid triangulation. The higher the level of
// the tree, the larger the number of extracted bounding boxes, and the more
// accurate is the description of the fluid domain.
// However, a large number of bounding boxes also implies a large
// communication cost, since the collection of bounding boxes is gathered by
// all processes.
unsigned int fluid_rtree_extraction_level = 1;
// The only two numerical parameters used in the equations are the viscosity
// of the fluid, and the penalty term $\beta$ used in the Nitsche
// formulation:
double viscosity = 1.0;
double penalty_term = 100;
// By default, we create a hyper_cube without colorization, and we use
// homogeneous Dirichlet boundary conditions. In this set we store the
// boundary ids to use when setting the boundary conditions:
std::list<types::boundary_id> homogeneous_dirichlet_ids{0};
// We illustrate here another way to create a Triangulation from a parameter
// file, using the method GridGenerator::generate_from_name_and_arguments(),
// that takes the name of a function in the GridGenerator namespace, and its
// arguments as a single string representing the arguments as a tuple.
//
// The mechanism with which the arguments are parsed from and to a string is
// explained in detail in the Patterns::Tools::Convert class, which is
// used to translate from strings to most of the basic STL types (vectors,
// maps, tuples) and basic deal.II types (Point, Tensor, BoundingBox, etc.).
//
// In general objects that can be represented by rank 1 uniform elements
// (i.e., std::vector<double>, Point<dim>, std::set<int>, etc.) are comma
// separated. Additional ranks take a semicolon, allowing you to parse
// strings into objects of type `std::vector<std::vector<double>>`, or,
// for example, `std::vector<Point<dim>>`, as `0.0, 0.1; 0.1, 0.2`. This
// string could be interpreted as a vector of two Point objects, or a vector
// of vector of doubles.
//
// When the entries are not uniform, as in the tuple case, we use a colon
// to separate the various entries. For example, a string like `5: 0.1, 0.2`
// could be used to parse an object of type `std::pair<int, Point<2>>` or a
// `std::tuple<int, std::vector<double>>`.
//
// In our case most of the arguments are Point objects (representing
// centers, corners, subdivision elements, etc.), integer values (number of
// subdivisions), double values (radius, lengths, etc.), or boolean options
// (such as the `colorize` option that many GridGenerator functions take).
//
// In the example below, we set reasonable default values, but these can be
// changed at run time by selecting any other supported function of the
// GridGenerator namespace. If the GridGenerator function fails, this
// program will interpret the name of the grid as a vtk grid filename, and
// the arguments as a map from manifold_id to the CAD files describing the
// geometry of the domain. Every CAD file will be analyzed and a Manifold of
// the OpenCASCADE namespace will be generated according to the content of
// the CAD file itself.
//
// To be as generic as possible, we do this for each of the generated grids:
// the fluid grid, the solid grid, but also the tracer particles which are
// also generated using a triangulation.
std::string name_of_fluid_grid = "hyper_cube";
std::string arguments_for_fluid_grid = "-1: 1: false";
std::string name_of_solid_grid = "hyper_rectangle";
std::string arguments_for_solid_grid = spacedim == 2 ?
"-.5, -.1: .5, .1: false" :
"-.5, -.1, -.1: .5, .1, .1: false";
std::string name_of_particle_grid = "hyper_ball";
std::string arguments_for_particle_grid =
spacedim == 2 ? "0.3, 0.3: 0.1: false" : "0.3, 0.3, 0.3 : 0.1: false";
// Similarly, we allow for different local refinement strategies. In
// particular, we limit the maximum number of refinement levels, in order
// to control the minimum size of the fluid grid, and guarantee that it is
// compatible with the solid grid. The minimum number of refinement levels
// is also controlled to ensured sufficient accuracy in the
// bulk of the flow. Additionally, we perform local refinement
// based on standard error estimators on the fluid velocity field.
//
// We permit the user to choose between the
// two most common refinement strategies, namely `fixed_number` or
// `fixed_fraction`, that refer to the methods
// GridRefinement::refine_and_coarsen_fixed_fraction() and
// GridRefinement::refine_and_coarsen_fixed_number().
//
// Refinement may be done every few time steps, instead of continuously, and
// we control this value by the `refinement_frequency` parameter:
int max_level_refinement = 8;
int min_level_refinement = 5;
std::string refinement_strategy = "fixed_fraction";
double coarsening_fraction = 0.3;
double refinement_fraction = 0.3;
unsigned int max_cells = 20000;
int refinement_frequency = 5;
// Finally, the following two function objects are used to control the
// source term of Stokes flow and the angular velocity at which we move the
// solid body. In a more realistic simulation, the solid velocity or its
// deformation would come from the solution of an auxiliary problem on the
// solid domain. In this example step we leave this part aside, and simply
// impose a fixed rotational velocity field along the z-axis on the immersed
// solid, governed by a function that can be specified in the parameter
// file:
mutable ParameterAcceptorProxy<Functions::ParsedFunction<spacedim>> rhs;
mutable ParameterAcceptorProxy<Functions::ParsedFunction<spacedim>>
angular_velocity;
};
// There remains the task of declaring what run-time parameters we can accept
// in input files. We split the parameters in various categories, by putting
// them in different sections of the ParameterHandler class. We begin by
// declaring all the global parameters used by StokesImmersedProblem
// in the global scope:
template <int dim, int spacedim>
StokesImmersedProblemParameters<dim,
spacedim>::StokesImmersedProblemParameters()
: ParameterAcceptor("Stokes Immersed Problem/")
, rhs("Right hand side", spacedim + 1)
, angular_velocity("Angular velocity")
{
add_parameter(
"Velocity degree", velocity_degree, "", this->prm, Patterns::Integer(1));
add_parameter("Number of time steps", number_of_time_steps);
add_parameter("Output frequency", output_frequency);
add_parameter("Output directory", output_directory);
add_parameter("Final time", final_time);
add_parameter("Viscosity", viscosity);
add_parameter("Nitsche penalty term", penalty_term);
add_parameter("Initial fluid refinement",
initial_fluid_refinement,
"Initial mesh refinement used for the fluid domain Omega");
add_parameter("Initial solid refinement",
initial_solid_refinement,
"Initial mesh refinement used for the solid domain Gamma");
add_parameter("Fluid bounding boxes extraction level",
fluid_rtree_extraction_level,
"Extraction level of the rtree used to construct global "
"bounding boxes");
add_parameter(
"Particle insertion refinement",
particle_insertion_refinement,
"Refinement of the volumetric mesh used to insert the particles");
add_parameter(
"Homogeneous Dirichlet boundary ids",
homogeneous_dirichlet_ids,
"Boundary Ids over which homogeneous Dirichlet boundary conditions are applied");
// Next section is dedicated to the parameters used to create the
// various grids. We will need three different triangulations: `Fluid
// grid` is used to define the fluid domain, `Solid grid` defines the
// solid domain, and `Particle grid` is used to distribute some tracer
// particles, that are advected with the velocity and only used as
// passive tracers.
enter_subsection("Grid generation");
{
add_parameter("Fluid grid generator", name_of_fluid_grid);
add_parameter("Fluid grid generator arguments", arguments_for_fluid_grid);
add_parameter("Solid grid generator", name_of_solid_grid);
add_parameter("Solid grid generator arguments", arguments_for_solid_grid);
add_parameter("Particle grid generator", name_of_particle_grid);
add_parameter("Particle grid generator arguments",
arguments_for_particle_grid);
}
leave_subsection();
enter_subsection("Refinement and remeshing");
{
add_parameter("Refinement step frequency", refinement_frequency);
add_parameter("Refinement maximal level", max_level_refinement);
add_parameter("Refinement minimal level", min_level_refinement);
add_parameter("Refinement strategy",
refinement_strategy,
"",
this->prm,
Patterns::Selection("fixed_fraction|fixed_number"));
add_parameter("Refinement coarsening fraction", coarsening_fraction);
add_parameter("Refinement fraction", refinement_fraction);
add_parameter("Maximum number of cells", max_cells);
}
leave_subsection();
// The final task is to correct the default dimension for the right hand
// side function and define a meaningful default angular velocity instead of
// zero.
rhs.declare_parameters_call_back.connect([&]() {
Functions::ParsedFunction<spacedim>::declare_parameters(this->prm,
spacedim + 1);
});
angular_velocity.declare_parameters_call_back.connect([&]() {
this->prm.set("Function expression",
"t < .500001 ? 6.283185 : -6.283185");
});
}
// Once the angular velocity is provided as a Function object, we reconstruct
// the pointwise solid velocity through the following class which derives
// from the Function class. It provides the value of the velocity of
// the solid body at a given position by assuming that the body rotates
// around the origin (or the $z$ axis in 3d) with a given angular velocity.
template <int spacedim>
class SolidVelocity : public Function<spacedim>
{
public:
static_assert(spacedim > 1,
"Cannot instantiate SolidVelocity for spacedim == 1");
SolidVelocity(const Functions::ParsedFunction<spacedim> &angular_velocity)
: angular_velocity(angular_velocity)
{}
virtual double value(const Point<spacedim> &p,
unsigned int component = 0) const override
{
Tensor<1, spacedim> velocity;
// We assume that the angular velocity is directed along the z-axis, i.e.,
// we model the actual angular velocity as if it was a two-dimensional
// rotation, irrespective of the actual value of `spacedim`.
const double omega = angular_velocity.value(p);
velocity[0] = -omega * p[1];
velocity[1] = omega * p[0];
return velocity[component];
}
private:
const Functions::ParsedFunction<spacedim> &angular_velocity;
};
// Similarly, we assume that the solid position can be computed explicitly at
// each time step, exploiting the knowledge of the angular velocity. We
// compute the exact position of the solid particle assuming that the solid is
// rotated by an amount equal to the time step multiplied by the angular
// velocity computed at the point `p`:
template <int spacedim>
class SolidPosition : public Function<spacedim>
{
public:
static_assert(spacedim > 1,
"Cannot instantiate SolidPosition for spacedim == 1");
SolidPosition(const Functions::ParsedFunction<spacedim> &angular_velocity,
const double time_step)
: Function<spacedim>(spacedim)
, angular_velocity(angular_velocity)
, time_step(time_step)
{}
virtual double value(const Point<spacedim> &p,
unsigned int component = 0) const override
{
Point<spacedim> new_position = p;
double dtheta = angular_velocity.value(p) * time_step;
new_position[0] = std::cos(dtheta) * p[0] - std::sin(dtheta) * p[1];
new_position[1] = std::sin(dtheta) * p[0] + std::cos(dtheta) * p[1];
return new_position[component];
}
void set_time_step(const double new_time_step)
{
time_step = new_time_step;
}
private:
const Functions::ParsedFunction<spacedim> &angular_velocity;
double time_step;
};
// @sect3{The StokesImmersedProblem class declaration}
// We are now ready to introduce the main class of our tutorial program. As
// usual, other than the constructor, we leave a single public entry point:
// the `run()` method. Everything else is left `private`, and accessed through
// the run method itself.
template <int dim, int spacedim = dim>
class StokesImmersedProblem
{
public:
StokesImmersedProblem(
const StokesImmersedProblemParameters<dim, spacedim> &par);
void run();
// The next section contains the `private` members of the class.
// The first method is similar to what is present in previous example.
// However it not only takes care of generating the grid for the fluid, but
// also the grid for the solid. The second computes the largest time step
// that guarantees that each particle moves of at most one cell. This is
// important to ensure that the Particles::ParticleHandler can find which
// cell a particle ends up in, as it can only look from one cell to its
// immediate neighbors (because, in a parallel setting, every MPI process
// only knows about the cells it owns as well as their immediate neighbors).
private:
void make_grid();
double compute_time_step() const;
// The next two functions initialize the
// Particles::ParticleHandler objects used in this class. We have two such
// objects: One represents passive tracers, used to plot the trajectories
// of fluid particles, while the the other represents material particles
// of the solid, which are placed at quadrature points of the solid grid.
void setup_tracer_particles();
void setup_solid_particles();
// The remainder of the set up is split in two parts: The first of the
// following two functions creates all objects that are needed once per
// simulation, whereas the other sets up all objects that need to be
// reinitialized at every refinement step.
void initial_setup();
void setup_dofs();
// The assembly routine is very similar to other Stokes assembly routines,
// with the exception of the Nitsche restriction part, which exploits one of
// the particle handlers to integrate on a non-matching part of the fluid
// domain, corresponding to the position of the solid. We split these two
// parts into two separate functions.
void assemble_stokes_system();
void assemble_nitsche_restriction();
// The remaining functions solve the linear system (which looks almost
// identical to the one in step-60) and then postprocess the solution: The
// refine_and_transfer() method is called only every `refinement_frequency`
// steps to adapt the mesh and also make sure that all the fields that were
// computed on the time step before refinement are transferred correctly to
// the new grid. This includes vector fields, as well as particle
// information. Similarly, we call the two output methods only every
// `output_frequency` steps.
void solve();
void refine_and_transfer();
void output_results(const unsigned int cycle, const double time) const;
void output_particles(const Particles::ParticleHandler<spacedim> &particles,
std::string fprefix,
const unsigned int iter,
const double time) const;
// Let us then move on to the member functions of the class. The first
// deals with run-time parameters that are read from a parameter file.
// As noted before, we make sure we cannot modify this object from within
// this class, by making it a `const` reference.
const StokesImmersedProblemParameters<dim, spacedim> ∥
// Then there is also the MPI communicator object that we will use to
// let processes send information across the network if the program runs
// in parallel, along with the `pcout` object and timer information
// that has also been employed by step-40, for example:
MPI_Comm mpi_communicator;
ConditionalOStream pcout;
mutable TimerOutput computing_timer;
// Next is one of the main novelties with regard to step-60. Here we
// assume that both the solid and the fluid are fully distributed
// triangulations. This allows the problem to scale to a very large number
// of degrees of freedom, at the cost of communicating all the overlapping
// regions between non matching triangulations. This is especially tricky,
// since we make no assumptions on the relative position or distribution of
// the various subdomains of the two triangulations. In particular, we
// assume that every process owns only a part of the `solid_tria`, and only
// a part of the `fluid_tria`, not necessarily in the same physical region,
// and not necessarily overlapping.
//
// We could in principle try to create the initial subdivisions in such a
// way that each process's subdomains overlap between the solid and the
// fluid regions. However, this overlap would be destroyed during the
// simulation, and we would have to redistribute the DoFs again and again.
// The approach we follow in this tutorial is more flexible, and not much
// more expensive. We make two all-to-all communications at the beginning of
// the simulation to exchange information about an (approximate) information
// of the geometrical occupancy of each processor (done through a collection
// of bounding boxes).
//
// This information is used by the Particles::ParticleHandler class
// to exchange (using a some-to-some communication pattern) all particles,
// so that every process knows about the particles that live on the
// region occupied by the fluid subdomain that it owns.
//
// In order to couple the overlapping regions, we exploit the facilities
// implemented in the ParticleHandler class.
parallel::distributed::Triangulation<spacedim> fluid_tria;
parallel::distributed::Triangulation<dim, spacedim> solid_tria;
// Next come descriptions of the finite elements in use, along with
// appropriate quadrature formulas and the corresponding DoFHandler objects.
// For the current implementation, only `fluid_fe` is really necessary. For
// completeness, and to allow easy extension, we also keep the `solid_fe`
// around, which is however initialized to a FE_Nothing finite element
// space, i.e., one that has no degrees of freedom.
//
// We declare both finite element spaces as `std::unique_ptr` objects rather
// than regular member variables, to allow their generation after
// `StokesImmersedProblemParameters` has been initialized. In particular,
// they will be initialized in the `initial_setup()` method.
std::unique_ptr<FiniteElement<spacedim>> fluid_fe;
std::unique_ptr<FiniteElement<dim, spacedim>> solid_fe;
std::unique_ptr<Quadrature<spacedim>> fluid_quadrature_formula;
std::unique_ptr<Quadrature<dim>> solid_quadrature_formula;
DoFHandler<spacedim> fluid_dh;
DoFHandler<dim, spacedim> solid_dh;
std::unique_ptr<MappingFEField<dim, spacedim>> solid_mapping;
// Similarly to how things are done in step-22, we use a block system to
// treat the Stokes part of the problem, and follow very closely what was
// done there.
std::vector<IndexSet> fluid_owned_dofs;
std::vector<IndexSet> solid_owned_dofs;
std::vector<IndexSet> fluid_relevant_dofs;
std::vector<IndexSet> solid_relevant_dofs;
// Using this partitioning of degrees of freedom, we can then define all of
// the objects necessary to describe the linear systems in question:
AffineConstraints<double> constraints;
LA::MPI::BlockSparseMatrix system_matrix;
LA::MPI::BlockSparseMatrix preconditioner_matrix;
LA::MPI::BlockVector solution;
LA::MPI::BlockVector locally_relevant_solution;
LA::MPI::BlockVector system_rhs;
// Let us move to the particles side of this program. There are two
// Particles::ParticleHandler objects used to couple the solid with the
// fluid, and to describe the passive tracers. These, in many ways, play a
// role similar to the DoFHandler class used in the discretization, i.e.,
// they provide for an enumeration of particles and allow querying
// information about each particle.
Particles::ParticleHandler<spacedim> tracer_particle_handler;
Particles::ParticleHandler<spacedim> solid_particle_handler;
// For every tracer particle, we need to compute the velocity field in its
// current position, and update its position using a discrete time stepping
// scheme. We do this using distributed linear algebra objects that store
// the coordinates of each particle's location or velocity. That is, these
// vectors have `tracer_particle_handler.n_global_particles() * spacedim`
// entries that we will store in a way so that parts of the vector are
// partitioned across all processes. (Implicitly, we here make the
// assumption that the `spacedim` coordinates of each particle are stored in
// consecutive entries of the vector.) Thus, we need to determine who the
// owner of each vector entry is. We set this owner to be equal to the
// process that generated that particle at time $t=0$. This information is
// stored for every process in the
// `locally_owned_tracer_particle_coordinates` IndexSet.
//
// Once the particles have been distributed around to match the process that
// owns the region where the particle lives, we will need read access from
// that process to the corresponding velocity field. We achieve this by
// filling a read only velocity vector field that contains the relevant
// information in ghost entries. This is achieved using the
// `locally_relevant_tracer_particle_coordinates` IndexSet, that keeps track
// of how things change during the simulation, i.e., it keeps track of where
// particles that the current process owns have ended up being, and who owns
// the particles that ended up in my subdomain.
//
// While this is not the most efficient strategy, we keep it this way to
// illustrate how things would work in a real fluid-structure
// interaction (FSI) problem. If a particle is linked to a specific solid
// degree of freedom, we are not free to choose who owns it, and we have to
// communicate this information around. We illustrate this here, and show
// that the communication pattern is point-to-point, and negligible in terms
// of total cost of the algorithm.
//
// The vectors defined based on these subdivisions are then used to store
// the particles velocities (read-only, with ghost entries) and their
// displacement (read/write, no ghost entries).
IndexSet locally_owned_tracer_particle_coordinates;
IndexSet locally_relevant_tracer_particle_coordinates;
LA::MPI::Vector tracer_particle_velocities;
LA::MPI::Vector relevant_tracer_particle_displacements;
// One of the key points of this tutorial program is the coupling between
// two independent parallel::distributed::Triangulation objects, one of
// which may be moving and deforming (with possibly large deformations) with
// respect to the other. When both the fluid and the solid triangulations
// are of type parallel::distributed::Triangulation, every process has
// access only to its fraction of locally owned cells of each of the two
// triangulations. As mentioned above, in general, the locally owned domains
// are not overlapping.
//
// In order to allow for the efficient exchange of information between
// non-overlapping parallel::distributed::Triangulation objects, some
// algorithms of the library require the user to provide a rough description
// of the area occupied by the locally owned part of the triangulation, in
// the form of a collection of axis-aligned bounding boxes for each process,
// that provide a full covering of the locally owned part of the domain.
// This kind of information can then be used in situations where one needs
// to send information to the owner of the cell surrounding a known
// location, without knowing who that owner may in fact be. But, if one
// knows a collection of bounding boxes for the geometric area or volume
// each process owns, then we can determine a subset of all processes that
// might possibly own the cell in which that location lies: namely, all of
// those processes whose bounding boxes contain that point. Instead of
// sending the information associated to that location to all processes, one
// can then get away with only sending it to a small subset of the processes
// with point-to-point communication primitives. (You will notice that this
// also allows for the typical time-vs-memory trade-off: The more data we
// are willing to store about each process's owned area -- in the form of
// more refined bounding box information -- the less communication we have
// to perform.)
//
// We construct this information by gathering a vector (of length
// Utilities::MPI::n_mpi_processes()) of vectors of BoundingBox objects.
// We fill this vector using the extract_rtree_level() function, and allow
// the user to select what level of the tree to extract. The "level"
// corresponds to how coarse/fine the overlap of the area with bounding
// boxes should be.
//
// As an example, this is what would be extracted by the
// extract_rtree_level() function applied to a two dimensional hyper ball,
// distributed over three processes. Each image shows in green the bounding
// boxes associated to the locally owned cells of the triangulation on each
// process, and in violet the bounding boxes extracted from the rtree:
//
// @image html rtree-process-0.png
// @image html rtree-process-1.png
// @image html rtree-process-2.png
//
// We store these boxes in a global member variable, which is updated at
// every refinement step:
std::vector<std::vector<BoundingBox<spacedim>>> global_fluid_bounding_boxes;
};
// @sect3{The StokesImmersedProblem class implementation}
// @sect4{Object construction and mesh initialization functions}
// In the constructor, we create the mpi_communicator as well as
// the triangulations and dof_handler for both the fluid and the solid.
// Using the mpi_communicator, both the ConditionalOStream and TimerOutput
// object are constructed.
template <int dim, int spacedim>
StokesImmersedProblem<dim, spacedim>::StokesImmersedProblem(
const StokesImmersedProblemParameters<dim, spacedim> &par)
: par(par)
, mpi_communicator(MPI_COMM_WORLD)
, pcout(std::cout,
(Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
, computing_timer(mpi_communicator,
pcout,
TimerOutput::summary,
TimerOutput::wall_times)
, fluid_tria(mpi_communicator,
typename Triangulation<spacedim>::MeshSmoothing(
Triangulation<spacedim>::smoothing_on_refinement |
Triangulation<spacedim>::smoothing_on_coarsening))
, solid_tria(mpi_communicator,
typename Triangulation<dim, spacedim>::MeshSmoothing(
Triangulation<dim, spacedim>::smoothing_on_refinement |
Triangulation<dim, spacedim>::smoothing_on_coarsening))
, fluid_dh(fluid_tria)
, solid_dh(solid_tria)
{}
// In order to generate the grid, we first try to use the functions in the
// deal.II GridGenerator namespace, by leveraging the
// GridGenerator::generate_from_name_and_argument(). If this function fails,
// then we use the following method, where the name is interpreted as a
// filename, and the arguments are interpreted as a map from manifold ids to
// CAD files, and are converted to Manifold descriptors using the OpenCASCADE
// namespace facilities. At the top, we read the file into a triangulation:
template <int dim, int spacedim>
void read_grid_and_cad_files(const std::string &grid_file_name,
const std::string &ids_and_cad_file_names,
Triangulation<dim, spacedim> &tria)
{
GridIn<dim, spacedim> grid_in;
grid_in.attach_triangulation(tria);
grid_in.read(grid_file_name);
// If we got to this point, then the Triangulation has been read, and we are
// ready to attach to it the correct manifold descriptions. We perform the
// next lines of code only if deal.II has been built with OpenCASCADE
// support. For each entry in the map, we try to open the corresponding CAD
// file, we analyze it, and according to its content, opt for either a
// OpenCASCADE::ArcLengthProjectionLineManifold (if the CAD file contains a
// single `TopoDS_Edge` or a single `TopoDS_Wire`) or a
// OpenCASCADE::NURBSPatchManifold, if the file contains a single face.
// Notice that if the CAD files do not contain single wires, edges, or
// faces, an assertion will be throw in the generation of the Manifold.
//
// We use the Patterns::Tools::Convert class to do the conversion from the
// string to a map between manifold ids and file names for us:
#ifdef DEAL_II_WITH_OPENCASCADE
using map_type = std::map<types::manifold_id, std::string>;
using Converter = Patterns::Tools::Convert<map_type>;
for (const auto &pair : Converter::to_value(ids_and_cad_file_names))
{
const auto &manifold_id = pair.first;
const auto &cad_file_name = pair.second;
const auto extension = boost::algorithm::to_lower_copy(
cad_file_name.substr(cad_file_name.find_last_of('.') + 1));
TopoDS_Shape shape;
if (extension == "iges" || extension == "igs")
shape = OpenCASCADE::read_IGES(cad_file_name);
else if (extension == "step" || extension == "stp")
shape = OpenCASCADE::read_STEP(cad_file_name);
else
AssertThrow(false,
ExcNotImplemented("We found an extension that we "
"do not recognize as a CAD file "
"extension. Bailing out."));
// Now we check how many faces are contained in the `Shape`. OpenCASCADE
// is intrinsically 3D, so if this number is zero, we interpret this as
// a line manifold, otherwise as a
// OpenCASCADE::NormalToMeshProjectionManifold in `spacedim` = 3, or
// OpenCASCADE::NURBSPatchManifold in `spacedim` = 2.
const auto n_elements = OpenCASCADE::count_elements(shape);
if ((std::get<0>(n_elements) == 0))
tria.set_manifold(
manifold_id,
OpenCASCADE::ArclengthProjectionLineManifold<dim, spacedim>(shape));
else if (spacedim == 3)
{
// We use this trick, because
// OpenCASCADE::NormalToMeshProjectionManifold is only implemented
// for spacedim = 3. The check above makes sure that things actually
// work correctly.
const auto t = reinterpret_cast<Triangulation<dim, 3> *>(&tria);
t->set_manifold(manifold_id,
OpenCASCADE::NormalToMeshProjectionManifold<dim, 3>(
shape));
}
else
// We also allow surface descriptions in two dimensional spaces based
// on single NURBS patches. For this to work, the CAD file must
// contain a single `TopoDS_Face`.
tria.set_manifold(manifold_id,
OpenCASCADE::NURBSPatchManifold<dim, spacedim>(
TopoDS::Face(shape)));
}
#else
(void)ids_and_cad_file_names;
AssertThrow(false, ExcNotImplemented("Generation of the grid failed."));
#endif
}
// Now let's put things together, and make all the necessary grids. As
// mentioned above, we first try to generate the grid internally, and if we
// fail (i.e., if we end up in the `catch` clause), then we proceed with the
// above function.
//
// We repeat this pattern for both the fluid and the solid mesh.
template <int dim, int spacedim>
void StokesImmersedProblem<dim, spacedim>::make_grid()
{
try
{
GridGenerator::generate_from_name_and_arguments(
fluid_tria, par.name_of_fluid_grid, par.arguments_for_fluid_grid);
}
catch (...)
{
pcout << "Generating from name and argument failed." << std::endl
<< "Trying to read from file name." << std::endl;
read_grid_and_cad_files(par.name_of_fluid_grid,
par.arguments_for_fluid_grid,
fluid_tria);
}
fluid_tria.refine_global(par.initial_fluid_refinement);
try
{
GridGenerator::generate_from_name_and_arguments(
solid_tria, par.name_of_solid_grid, par.arguments_for_solid_grid);
}
catch (...)
{
read_grid_and_cad_files(par.name_of_solid_grid,
par.arguments_for_solid_grid,
solid_tria);
}
solid_tria.refine_global(par.initial_solid_refinement);
}
// @sect4{Particle initialization functions}
// Once the solid and fluid grids have been created, we start filling the
// Particles::ParticleHandler objects. The first one we take care of is the
// one we use to keep track of passive tracers in the fluid. These are
// simply transported along, and in some sense their locations are
// unimportant: We just want to use them to see where flow is being
// transported. We could use any way we choose to determine where they are
// initially located. A convenient one is to create the initial locations as
// the vertices of a mesh in a shape of our choice -- a choice determined by
// one of the run-time parameters in the parameter file.
//
// In this implementation, we create tracers using the support points of a
// FE_Q finite element space defined on a temporary grid, which is then
// discarded. Of this grid, we only keep around the Particles::Particle
// objects (stored in a Particles::ParticleHandler class) associated to the
// support points.
//
// The Particles::ParticleHandler class offers the possibility to insert a set
// of particles that live physically in the part of the domain owned by the
// active process. However, in this case this function would not suffice. The
// particles generated as the locally owned support points of an FE_Q object
// on an arbitrary grid (non-matching with regard to the fluid grid) have no
// reasons to lie in the same physical region of the locally owned subdomain
// of the fluid grid. In fact this will almost never be the case, especially
// since we want to keep track of what is happening to the particles
// themselves.
//
// In particle-in-cell methods (PIC), it is often customary to assign
// ownership of the particles to the process where the particles lie. In this
// tutorial we illustrate a different approach, which is useful if one wants
// to keep track of information related to the particles (for example, if a
// particle is associated to a given degree of freedom, which is owned by a
// specific process and not necessarily the same process that owns the fluid
// cell where the particle happens to be at any given time).
// In the approach used here, ownership of the particles is assigned once at
// the beginning, and one-to-one communication happens whenever the original
// owner needs information from the process that owns the cell where the
// particle lives. We make sure that we set ownership of the particles using
// the initial particle distribution, and keep the same ownership throughout
// the execution of the program.
//
// With this overview out of the way, let us see what the function does. At
// the top, we create a temporary triangulation and DoFHandler object from
// which we will take the node locations for initial particle locations:
template <int dim, int spacedim>
void StokesImmersedProblem<dim, spacedim>::setup_tracer_particles()
{
parallel::distributed::Triangulation<spacedim> particle_insert_tria(
mpi_communicator);
GridGenerator::generate_from_name_and_arguments(
particle_insert_tria,
par.name_of_particle_grid,
par.arguments_for_particle_grid);
particle_insert_tria.refine_global(par.particle_insertion_refinement);
FE_Q<spacedim> particles_fe(1);
DoFHandler<spacedim> particles_dof_handler(particle_insert_tria);
particles_dof_handler.distribute_dofs(particles_fe);
// This is where things start to get complicated. Since we may run
// this program in a parallel environment, every parallel process will now
// have created these temporary triangulations and DoFHandlers. But, in
// fully distributed triangulations, the active process only knows about the
// locally owned cells, and has no idea of how other processes have
// distributed their own cells. This is true for both the temporary
// triangulation created above as well as the fluid triangulation into which
// we want to embed the particles below. On the other hand, these locally
// known portions of the two triangulations will, in general, not overlap.
// That is, the locations of the particles we will create from the node