# Listing of Parameters # --------------------- # A list of names of additional shared libraries that should be loaded upon # starting up the program. The names of these files can contain absolute or # relative paths (relative to the directory in which you call ASPECT). In # fact, file names that do not contain any directory information (i.e., only # the name of a file such as will not be found if they are not # located in one of the directories listed in the \texttt{LD_LIBRARY_PATH} # environment variable. In order to load a library in the current directory, # use <./myplugin.so> instead. # # The typical use of this parameter is so that you can implement additional # plugins in your own directories, rather than in the ASPECT source # directories. You can then simply compile these plugins into a shared library # without having to re-compile all of ASPECT. See the section of the manual # discussing writing extensions for more information on how to compile # additional files into a shared library. set Additional shared libraries = # In order to make the problem in the first time step easier to solve, we need # a reasonable guess for the temperature and pressure. To obtain it, we use an # adiabatic pressure and temperature field. This parameter describes what the # `adiabatic' temperature would be at the surface of the domain (i.e. at depth # zero). Note that this value need not coincide with the boundary condition # posed at this point. Rather, the boundary condition may differ significantly # from the adiabatic value, and then typically induce a thermal boundary # layer. # # For more information, see the section in the manual that discusses the # general mathematical model. set Adiabatic surface temperature = 1000.0 # default: 0. # In computations, the time step $k$ is chosen according to $k = c \min_K # \frac {h_K} {\|u\|_{\infty,K} p_T}$ where $h_K$ is the diameter of cell $K$, # and the denominator is the maximal magnitude of the velocity on cell $K$ # times the polynomial degree $p_T$ of the temperature discretization. The # dimensionless constant $c$ is called the CFL number in this program. For # time discretizations that have explicit components, $c$ must be less than a # constant that depends on the details of the time discretization and that is # no larger than one. On the other hand, for implicit discretizations such as # the one chosen here, one can choose the time step as large as one wants (in # particular, one can choose $c>1$) though a CFL number significantly larger # than one will yield rather diffusive solutions. Units: None. set CFL number = 1.0 # The number of space dimensions you want to run this program in. ASPECT can # run in 2 and 3 space dimensions. set Dimension = 2 # The end time of the simulation. The default value is a number so that when # converted from years to seconds it is approximately equal to the largest # number representable in floating point arithmetic. For all practical # purposes, this equals infinity. Units: Years if the 'Use years in output # instead of seconds' parameter is set; seconds otherwise. set End time = 1.0e10 # default: 5.69e+300 # The maximal number of nonlinear iterations to be performed. set Max nonlinear iterations = 10 # The maximal number of nonlinear iterations to be performed in the # pre-refinement steps. This does not include the last refinement step before # moving to timestep 1. When this parameter has a larger value than max # nonlinear iterations, the latter is used. set Max nonlinear iterations in pre-refinement = 2147483647 # Set a maximum time step size for only the first timestep. Generally the time # step based on the CFL number should be sufficient, but for complicated # models or benchmarking it may be useful to limit the first time step to some # value, especially when using the free surface, which needs to settle to # prevent instabilities. This should in that case be combined with a value set # for ``Maximum relative increase in time step''. The default value is a value # so that when converted from years into seconds it equals the largest number # representable by a floating point number, implying an unlimited time step. # Units: Years or seconds, depending on the ``Use years in output instead of # seconds'' parameter. set Maximum first time step = 5.69e+300 # Set a percentage with which the the time step is limited to increase. # Generally the time step based on the CFL number should be sufficient, but # for complicated models which may suddenly drastically change behavior, it # may be useful to limit the increase in the time step, without limiting the # time step size of the whole simulation to a particular number. For example, # if this parameter is set to $50$, then that means that the time step can at # most increase by 50\% from one time step to the next, or by a factor of 1.5. # Units: \% set Maximum relative increase in time step = 2147483647 # Set a maximum time step size for the solver to use. Generally the time step # based on the CFL number should be sufficient, but for complicated models or # benchmarking it may be useful to limit the time step to some value. The # default value is a value so that when converted from years into seconds it # equals the largest number representable by a floating point number, implying # an unlimited time step.Units: Years or seconds, depending on the ``Use years # in output instead of seconds'' parameter. set Maximum time step = 5.69e+300 # The kind of scheme used to resolve the nonlinearity in the system. `single # Advection, single Stokes' means that no nonlinear iterations are done, and # the temperature, compositional fields and Stokes equations are solved # exactly once per time step, one after the other. The `iterated Advection and # Stokes' scheme iterates this decoupled approach by alternating the solution # of the temperature, composition and Stokes systems. The `single Advection, # iterated Stokes' scheme solves the temperature and composition equation once # at the beginning of each time step and then iterates out the solution of the # Stokes equation. The `no Advection, iterated Stokes' scheme only solves the # Stokes system, iterating out the solution, and ignores compositions and the # temperature equation (careful, the material model must not depend on the # temperature or composition; this is mostly useful for Stokes benchmarks). # The `no Advection, single Stokes' scheme only solves the Stokes system once # per timestep. This is also mostly useful for Stokes benchmarks. The `single # Advection, no Stokes' scheme only solves the temperature and other advection # systems once, and instead of solving for the Stokes system, a prescribed # velocity and pressure is used. The `iterated Advection and Newton Stokes' # scheme iterates by alternating the solution of the temperature, composition # and Stokes equations, using Picard iterations for the temperature and # composition, and Newton iterations for the Stokes system. The `single # Advection, iterated Newton Stokes' scheme solves the temperature and # composition equations once at the beginning of each time step and then # iterates out the solution of the Stokes equation, using Newton iterations # for the Stokes system. The `first timestep only, single Stokes' scheme # solves the Stokes equations exactly once, at the first time step. No # nonlinear iterations are done, and the temperature and composition systems # are not solved. # # The `IMPES' scheme is deprecated and only allowed for reasons of backwards # compatibility. It is the same as `single Advection, single Stokes' .The # `iterated IMPES' scheme is deprecated and only allowed for reasons of # backwards compatibility. It is the same as `iterated Advection and Stokes'. # The `iterated Stokes' scheme is deprecated and only allowed for reasons of # backwards compatibility. It is the same as `single Advection, iterated # Stokes'. The `Stokes only' scheme is deprecated and only allowed for reasons # of backwards compatibility. It is the same as `no Advection, iterated # Stokes'. The `Advection only' scheme is deprecated and only allowed for # reasons of backwards compatibility. It is the same as `single Advection, no # Stokes'. The `Newton Stokes' scheme is deprecated and only allowed for # reasons of backwards compatibility. It is the same as `iterated Advection # and Newton Stokes'. set Nonlinear solver scheme = single Advection, single Stokes # A relative tolerance up to which the nonlinear solver will iterate. This # parameter is only relevant if the `Nonlinear solver scheme' does nonlinear # iterations, in other words, if it is set to something other than `single # Advection, single Stokes' or `single Advection, no Stokes'. set Nonlinear solver tolerance = 1e-5 # The name of the directory into which all output files should be placed. This # may be an absolute or a relative path. set Output directory = /work/newriver/egrant93/Hackathon_2019/frank-kamenetskii/SUPG_tests/highRa_case/from_Shangxin/output_dimensional # default: output # If and how to normalize the pressure after the solution step. This is # necessary because depending on boundary conditions, in many cases the # pressure is only determined by the model up to a constant. On the other # hand, we often would like to have a well-determined pressure, for example # for table lookups of material properties in models or for comparing # solutions. If the given value is `surface', then normalization at the end of # each time steps adds a constant value to the pressure in such a way that the # average pressure at the surface of the domain is what is set in the `Surface # pressure' parameter; the surface of the domain is determined by asking the # geometry model whether a particular face of the geometry has a zero or small # `depth'. If the value of this parameter is `volume' then the pressure is # normalized so that the domain average is zero. If `no' is given, the no # pressure normalization is performed. set Pressure normalization = surface # A flag indicating whether the computation should be resumed from a # previously saved state (if true) or start from scratch (if false). If auto # is selected, models will be resumed if there is an existing checkpoint file, # otherwise started from scratch. set Resume computation = false # The start time of the simulation. Units: Years if the 'Use years in output # instead of seconds' parameter is set; seconds otherwise. set Start time = 0 # default: 0. # The value the pressure is normalized to in each time step when `Pressure # normalization' is set to `surface' with default value 0. This setting is # ignored in all other cases. # # The mathematical equations that describe thermal convection only determine # the pressure up to an arbitrary constant. On the other hand, for comparison # and for looking up material parameters it is important that the pressure be # normalized somehow. We do this by enforcing a particular average pressure # value at the surface of the domain, where the geometry model determines # where the surface is. This parameter describes what this average surface # pressure value is supposed to be. By default, it is set to zero, but one may # want to choose a different value for example for simulating only the volume # of the mantle below the lithosphere, in which case the surface pressure # should be the lithostatic pressure at the bottom of the lithosphere. # # For more information, see the section in the manual that discusses the # general mathematical model. set Surface pressure = 0.0 # default: 0. # How frequently in timesteps to output timing information. This is generally # adjusted only for debugging and timing purposes. If the value is set to zero # it will also output timing information at the initiation timesteps. set Timing output frequency = 10 # default: 100 # Mantle convection simulations are often focused on convection dominated # systems. However, these codes can also be used to investigate systems where # heat conduction plays a dominant role. This parameter indicates whether the # simulator should also use heat conduction in determining the length of each # time step. set Use conduction timestep = false # If set to true, the advection and reactions of compositional fields and # temperature are solved separately, and can use different time steps. Note # that this will only work if the material/heating model fills the # reaction\_rates/heating\_reaction\_rates structures. Operator splitting can # be used with any existing solver schemes that solve the # temperature/composition equations. set Use operator splitting = false # When computing results for mantle convection simulations, it is often # difficult to judge the order of magnitude of results when they are stated in # MKS units involving seconds. Rather, some kinds of results such as # velocities are often stated in terms of meters per year (or, sometimes, # centimeters per year). On the other hand, for non-dimensional computations, # one wants results in their natural unit system as used inside the code. If # this flag is set to `true' conversion to years happens; if it is `false', no # such conversion happens. Note that when `true', some input such as # prescribed velocities should also use years instead of seconds. set Use years in output instead of seconds = true # Name of the world builder file. If empty, the world builder is not # initialized. set World builder file = subsection Adiabatic conditions model # Select one of the following models: # # `ascii data': A model in which the adiabatic profile is read from a file # that describes the reference state. Note the required format of the input # data: The first lines may contain any number of comments if they begin # with `#', but one of these lines needs to contain the number of points in # the reference state as for example `# POINTS: 3'. Following the comment # lines there has to be a single line containing the names of all data # columns, separated by arbitrarily many spaces. Column names are not # allowed to contain spaces. The file can contain unnecessary columns, but # for this plugin it needs to at least provide columns named `temperature', # `pressure', and `density'. Note that the data lines in the file need to be # sorted in order of increasing depth from 0 to the maximal depth in the # model domain. Points in the model that are outside of the provided depth # range will be assigned the maximum or minimum depth values, respectively. # Points do not need to be equidistant, but the computation of properties is # optimized in speed if they are. # # `compute profile': A model in which the adiabatic profile is calculated by # solving the hydrostatic equations for pressure and temperature in depth. # The gravity is assumed to be in depth direction and the composition is # either given by the initial composition at reference points or computed as # a reference depth-function. All material parameters are computed by the # material model plugin. The surface conditions are either constant or # changing over time as prescribed by a user-provided function. # # `function': A model in which the adiabatic profile is specified by a user # defined function. The supplied function has to contain temperature, # pressure, and density as a function of depth in this order. set Model name = function # default: compute profile subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/tests/adiabatic-conditions/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Compute profile # Select how the reference profile for composition is computed. This # profile is used to evaluate the material model, when computing the # pressure and temperature profile. set Composition reference profile = initial composition # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The number of points we use to compute the adiabatic profile. The higher # the number of points, the more accurate the downward integration from # the adiabatic surface temperature will be. set Number of points = 1000 # Whether to use the 'Surface condition function' to determine surface # conditions, or the 'Adiabatic surface temperature' and 'Surface # pressure' parameters. If this is set to true the reference profile is # updated every timestep. The function expression of the function should # be independent of space, but can depend on time 't'. The function must # return two components, the first one being reference surface pressure, # the second one being reference surface temperature. set Use surface condition function = false # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,t subsection Surface condition function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric # value everywhere the constant appears. These values can be defined # using this parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or # multiplication, as well as all of the common functions such as `sin' # or `cos'. In addition, it may contain expressions like `if(x>0, 1, # -1)' where the expression evaluates to the second argument if the # first argument is true, and to the third argument otherwise. For a # full overview of possible expressions accepted see the documentation # of the muparser library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0; 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' # (in 3d) for spatial coordinates and `t' for time. You can then use # these variable names in your function expression and they will be # replaced by the values of these variables at which the function is # currently evaluated. However, you can also choose a different set of # names for the independent variables at which to evaluate your function # expression. For example, if you work in spherical coordinates, you may # wish to set this input parameter to `r,phi,theta,t' and then use these # variable names in your function expression. set Variable names = x,t end end subsection Function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # Expression for the adiabatic temperature, pressure, and density # separated by semicolons as a function of `depth'. set Function expression = 500.0;9.81*depth;100.0 # default: 0.0; 0.0; 1.0 set Variable names = depth end end subsection Boundary composition model # When the composition is fixed on a given boundary as determined by the # list of 'Fixed composition boundary indicators', there might be parts of # the boundary where material flows out and one may want to prescribe the # composition only on those parts of the boundary where there is inflow. # This parameter determines if compositions are only prescribed at these # inflow parts of the boundary (if false) or everywhere on a given boundary, # independent of the flow direction (if true). By default, this parameter is # set to false, except in models with melt transport (see below). Note that # in this context, `fixed' refers to the fact that these are the boundary # indicators where Dirichlet boundary conditions are applied, and does not # imply that the boundary composition is time-independent. # # Mathematically speaking, the compositional fields satisfy an advection # equation that has no diffusion. For this equation, one can only impose # Dirichlet boundary conditions (i.e., prescribe a fixed compositional field # value at the boundary) at those boundaries where material flows in. This # would correspond to the ``false'' setting of this parameter, which is # correspondingly the default. On the other hand, on a finite dimensional # discretization such as the one one obtains from the finite element method, # it is possible to also prescribe values on outflow boundaries, even though # this may make no physical sense. This would then correspond to the # ``true'' setting of this parameter. # # A warning for models with melt transport: In models with fluid flow, some # compositional fields (in particular the porosity) might be transported # with the fluid velocity, and would need to set the constraints based on # the fluid velocity. However, this is currently not possible, because we # reuse the same matrix for all compositional fields, and therefore can not # use different constraints for different fields. Consequently, we set this # parameter to true by default in models where melt transport is enabled. Be # aware that if you change this default setting, you will not use the melt # velocity, but the solid velocity to determine on which parts of the # boundaries there is outflow. set Allow fixed composition on outflow boundaries = false for models without melt # A comma separated list of names denoting those boundaries on which the # composition is fixed and described by the boundary composition object # selected in its own section of this input file. All boundary indicators # used by the geometry but not explicitly listed here will end up with # no-flux (insulating) boundary conditions. # # The names of the boundaries listed here can either be numbers (in which # case they correspond to the numerical boundary indicators assigned by the # geometry object), or they can correspond to any of the symbolic names the # geometry object may have provided for each part of the boundary. You may # want to compare this with the documentation of the geometry model you use # in your model. # # This parameter only describes which boundaries have a fixed composition, # but not what composition should hold on these boundaries. The latter piece # of information needs to be implemented in a plugin in the # BoundaryComposition group, unless an existing implementation in this group # already provides what you want. set Fixed composition boundary indicators = # A comma-separated list of boundary composition models that will be used to # initialize the composition. These plugins are loaded in the order given, # and modify the existing composition field via the operators listed in # 'List of model operators'. # # The following boundary composition models are available: # # `ascii data': Implementation of a model in which the boundary composition # is derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `composition1', # `composition2', etc. in a 2d model and `x', `y', `composition1', # `composition2', etc., in a 3d model, according to the number of # compositional fields, which means that there has to be a single column for # every composition in the model. Note that the data in the input files need # to be sorted in a specific order: the first coordinate needs to ascend # first, followed by the second in order to assign the correct data to the # prescribed coordinates.If you use a spherical model, then the assumed grid # changes. `x' will be replaced by the radial distance of the point to the # bottom of the model, `y' by the azimuth angle and `z' by the polar angle # measured positive from the north pole. The grid will be assumed to be a # latitude-longitude grid. Note that the order of spherical coordinates is # `r', `phi', `theta' and not `r', `theta', `phi', since this allows for # dimension independent expressions. # # `box': A model in which the composition is chosen constant on the sides of # a box which are selected by the parameters # Left/Right/Top/Bottom/Front/Back composition # # `box with lithosphere boundary indicators': A model in which the # composition is chosen constant on all the sides of a box. Additional # boundary indicators are added to the lithospheric parts of the vertical # boundaries. This model is to be used with the 'Two Merged Boxes' Geometry # Model. # # `function': Implementation of a model in which the boundary composition is # given in terms of an explicit formula that is elaborated in the parameters # in section ``Boundary composition model|Function''. # # Since the symbol $t$ indicating time may appear in the formulas for the # prescribed composition, it is interpreted as having units seconds unless # the global input parameter ``Use years in output instead of seconds'' is # set, in which case we interpret the formula expressions as having units # year. # # The format of these functions follows the syntax understood by the # muparser library, see Section~\ref{sec:muparser-format}. # # `initial composition': A model in which the composition at the boundary is # chosen to be the same as given in the initial conditions. # # Because this class simply takes what the initial composition had # described, this class can not know certain pieces of information such as # the minimal and maximal composition on the boundary. For operations that # require this, for example in post-processing, this boundary composition # model must therefore be told what the minimal and maximal values on the # boundary are. This is done using parameters set in section ``Boundary # composition model/Initial composition''. # # `spherical constant': A model in which the composition is chosen constant # on the inner and outer boundaries of a surface, spherical shell, chunk or # ellipsoidal chunk. Parameters are read from subsection 'Spherical # constant'. set List of model names = # A comma-separated list of operators that will be used to append the listed # composition models onto the previous models. If only one operator is # given, the same operator is applied to all models. set List of model operators = add # Select one of the following models: # # `ascii data': Implementation of a model in which the boundary composition # is derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `composition1', # `composition2', etc. in a 2d model and `x', `y', `composition1', # `composition2', etc., in a 3d model, according to the number of # compositional fields, which means that there has to be a single column for # every composition in the model. Note that the data in the input files need # to be sorted in a specific order: the first coordinate needs to ascend # first, followed by the second in order to assign the correct data to the # prescribed coordinates.If you use a spherical model, then the assumed grid # changes. `x' will be replaced by the radial distance of the point to the # bottom of the model, `y' by the azimuth angle and `z' by the polar angle # measured positive from the north pole. The grid will be assumed to be a # latitude-longitude grid. Note that the order of spherical coordinates is # `r', `phi', `theta' and not `r', `theta', `phi', since this allows for # dimension independent expressions. # # `box': A model in which the composition is chosen constant on the sides of # a box which are selected by the parameters # Left/Right/Top/Bottom/Front/Back composition # # `box with lithosphere boundary indicators': A model in which the # composition is chosen constant on all the sides of a box. Additional # boundary indicators are added to the lithospheric parts of the vertical # boundaries. This model is to be used with the 'Two Merged Boxes' Geometry # Model. # # `function': Implementation of a model in which the boundary composition is # given in terms of an explicit formula that is elaborated in the parameters # in section ``Boundary composition model|Function''. # # Since the symbol $t$ indicating time may appear in the formulas for the # prescribed composition, it is interpreted as having units seconds unless # the global input parameter ``Use years in output instead of seconds'' is # set, in which case we interpret the formula expressions as having units # year. # # The format of these functions follows the syntax understood by the # muparser library, see Section~\ref{sec:muparser-format}. # # `initial composition': A model in which the composition at the boundary is # chosen to be the same as given in the initial conditions. # # Because this class simply takes what the initial composition had # described, this class can not know certain pieces of information such as # the minimal and maximal composition on the boundary. For operations that # require this, for example in post-processing, this boundary composition # model must therefore be told what the minimal and maximal values on the # boundary are. This is done using parameters set in section ``Boundary # composition model/Initial composition''. # # `spherical constant': A model in which the composition is chosen constant # on the inner and outer boundaries of a surface, spherical shell, chunk or # ellipsoidal chunk. Parameters are read from subsection 'Spherical # constant'. # # \textbf{Warning}: This parameter provides an old and deprecated way of # specifying boundary composition models and shouldn't be used. Please use # 'List of model names' instead. set Model name = unspecified subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/boundary-composition/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = box_2d_%s.%d.txt # Time step between following data files. Depending on the setting of the # global `Use years in output instead of seconds' flag in the input file, # this number is either interpreted as seconds or as years. The default is # one million, i.e., either one million seconds or one million years. set Data file time step = 1e6 # In some cases the boundary files are not numbered in increasing but in # decreasing order (e.g. `Ma BP'). If this flag is set to `True' the # plugin will first load the file with the number `First data file number' # and decrease the file number during the model run. set Decreasing file order = false # Time from which on the data file with number `First data file number' is # used as boundary condition. Until this time, a boundary condition equal # to zero everywhere is assumed. Depending on the setting of the global # `Use years in output instead of seconds' flag in the input file, this # number is either interpreted as seconds or as years. set First data file model time = 0 # Number of the first velocity file to be loaded when the model time is # larger than `First velocity file model time'. set First data file number = 0 # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Box # A comma separated list of composition boundary values at the bottom # boundary (at minimal $y$-value in 2d, or minimal $z$-value in 3d). This # list must have as many entries as there are compositional fields. Units: # none. set Bottom composition = # A comma separated list of composition boundary values at the left # boundary (at minimal $x$-value). This list must have as many entries as # there are compositional fields. Units: none. set Left composition = # A comma separated list of composition boundary values at the right # boundary (at maximal $x$-value). This list must have as many entries as # there are compositional fields. Units: none. set Right composition = # A comma separated list of composition boundary values at the top # boundary (at maximal $y$-value in 2d, or maximal $z$-value in 3d). This # list must have as many entries as there are compositional fields. Units: # none. set Top composition = end subsection Box with lithosphere boundary indicators # A comma separated list of composition boundary values at the bottom # boundary (at minimal $y$-value in 2d, or minimal $z$-value in 3d). This # list must have as many entries as there are compositional fields. Units: # none. set Bottom composition = # A comma separated list of composition boundary values at the left # boundary (at minimal $x$-value). This list must have as many entries as # there are compositional fields. Units: none. set Left composition = # A comma separated list of composition boundary values at the left # boundary (at minimal $x$-value). This list must have as many entries as # there are compositional fields. Units: none. set Left composition lithosphere = # A comma separated list of composition boundary values at the right # boundary (at maximal $x$-value). This list must have as many entries as # there are compositional fields. Units: none. set Right composition = # A comma separated list of composition boundary values at the right # boundary (at maximal $x$-value). This list must have as many entries as # there are compositional fields. Units: none. set Right composition lithosphere = # A comma separated list of composition boundary values at the top # boundary (at maximal $y$-value in 2d, or maximal $z$-value in 3d). This # list must have as many entries as there are compositional fields. Units: # none. set Top composition = end subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are 'cartesian', 'spherical', and # 'depth'. 'spherical' coordinates are interpreted as r,phi or r,phi,theta # in 2D/3D respectively with theta being the polar angle. 'depth' will # create a function, in which only the first parameter is non-zero, which # is interpreted to be the depth of the point. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Initial composition # Maximal composition. Units: none. set Maximal composition = 1. # Minimal composition. Units: none. set Minimal composition = 0. end subsection Spherical constant # Composition at the inner boundary (core mantle boundary). Units: none. set Inner composition = 1. # Composition at the outer boundary (lithosphere water/air). For a # spherical geometry model, this is the only boundary. Units: none. set Outer composition = 0. end end subsection Boundary fluid pressure model # Select one of the following plugins: # # `density': A plugin that prescribes the fluid pressure gradient at the # boundary based on fluid/solid density from the material model. set Plugin name = density subsection Density # The density formulation used to compute the fluid pressure gradient at # the model boundary. # # `solid density' prescribes the gradient of the fluid pressure as solid # density times gravity (which is the lithostatic pressure) and leads to # approximately the same pressure in the melt as in the solid, so that # fluid is only flowing in or out due to differences in dynamic pressure. # # `fluid density' prescribes the gradient of the fluid pressure as fluid # density times gravity and causes melt to flow in with the same velocity # as inflowing solid material, or no melt flowing in or out if the solid # velocity normal to the boundary is zero. # # 'average density' prescribes the gradient of the fluid pressure as the # averaged fluid and solid density times gravity (which is a better # approximation for the lithostatic pressure than just the solid density) # and leads to approximately the same pressure in the melt as in the # solid, so that fluid is only flowing in or out due to differences in # dynamic pressure. set Density formulation = solid density end end subsection Boundary heat flux model # A comma separated list of names denoting those boundaries on which the # heat flux is fixed and described by the boundary heat flux object selected # in the 'Model name' parameter. All boundary indicators used by the # geometry but not explicitly listed here or in the list of 'Fixed # temperature boundary indicators' in the 'Boundary temperature model' will # end up with no-flux (insulating) boundary conditions. # # The names of the boundaries listed here can either be numbers (in which # case they correspond to the numerical boundary indicators assigned by the # geometry object), or they can correspond to any of the symbolic names the # geometry object may have provided for each part of the boundary. You may # want to compare this with the documentation of the geometry model you use # in your model. # # This parameter only describes which boundaries have a fixed heat flux, but # not what heat flux should hold on these boundaries. The latter piece of # information needs to be implemented in a plugin in the BoundaryHeatFlux # group, unless an existing implementation in this group already provides # what you want. set Fixed heat flux boundary indicators = # Select one of the following plugins: # # `function': Implementation of a model in which the boundary heat flux is # given in terms of an explicit formula that is elaborated in the parameters # in section ``Boundary heat flux model|Function''. The format of these # functions follows the syntax understood by the muparser library, see # Section~\ref{sec:muparser-format}. # # The formula you describe in the mentioned section is a scalar value for # the heat flux that is assumed to be the flux normal to the boundary, and # that has the unit W/(m$^2$) (in 3d) or W/m (in 2d). Negative fluxes are # interpreted as the flow of heat into the domain, and positive fluxes are # interpreted as heat flowing out of the domain. # # The symbol $t$ indicating time that may appear in the formulas for the # prescribed heat flux is interpreted as having units seconds unless the # global parameter ``Use years in output instead of seconds'' has been set. set Model name = function subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `cartesian', `spherical', and # `depth'. `spherical' coordinates are interpreted as r,phi or r,phi,theta # in 2D/3D respectively with theta being the polar angle. `depth' will # create a function, in which only the first parameter is non-zero, which # is interpreted to be the depth of the point. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end end subsection Boundary temperature model # When the temperature is fixed on a given boundary as determined by the # list of 'Fixed temperature boundary indicators', there might be parts of # the boundary where material flows out and one may want to prescribe the # temperature only on the parts of the boundary where there is inflow. This # parameter determines if temperatures are only prescribed at these inflow # parts of the boundary (if false) or everywhere on a given boundary, # independent of the flow direction (if true).Note that in this context, # `fixed' refers to the fact that these are the boundary indicators where # Dirichlet boundary conditions are applied, and does not imply that the # boundary temperature is time-independent. # # Mathematically speaking, the temperature satisfies an advection-diffusion # equation. For this type of equation, one can prescribe the temperature # even on outflow boundaries as long as the diffusion coefficient is # nonzero. This would correspond to the ``true'' setting of this parameter, # which is correspondingly the default. In practice, however, this would # only make physical sense if the diffusion coefficient is actually quite # large to prevent the creation of a boundary layer. In addition, if there # is no diffusion, one can only impose Dirichlet boundary conditions (i.e., # prescribe a fixed temperature value at the boundary) at those boundaries # where material flows in. This would correspond to the ``false'' setting of # this parameter. set Allow fixed temperature on outflow boundaries = true # A comma separated list of names denoting those boundaries on which the # temperature is fixed and described by the boundary temperature object # selected in the 'List of model names' parameter. All boundary indicators # used by the geometry but not explicitly listed here will end up with # no-flux (insulating) boundary conditions, or, if they are listed in the # 'Fixed heat flux boundary indicators', with Neumann boundary conditions. # # The names of the boundaries listed here can either be numbers (in which # case they correspond to the numerical boundary indicators assigned by the # geometry object), or they can correspond to any of the symbolic names the # geometry object may have provided for each part of the boundary. You may # want to compare this with the documentation of the geometry model you use # in your model. # # This parameter only describes which boundaries have a fixed temperature, # but not what temperature should hold on these boundaries. The latter piece # of information needs to be implemented in a plugin in the # BoundaryTemperature group, unless an existing implementation in this group # already provides what you want. set Fixed temperature boundary indicators = top, bottom # default: # A comma-separated list of boundary temperature models that will be used to # initialize the temperature. These plugins are loaded in the order given, # and modify the existing temperature field via the operators listed in # 'List of model operators'. # # The following boundary temperature models are available: # # `ascii data': Implementation of a model in which the boundary data is # derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `Temperature [K]' in a 2d # model and `x', `y', `Temperature [K]' in a 3d model, which means that # there has to be a single column containing the temperature. Note that the # data in the input files need to be sorted in a specific order: the first # coordinate needs to ascend first, followed by the second in order to # assign the correct data to the prescribed coordinates. If you use a # spherical model, then the assumed grid changes. `x' will be replaced by # the radial distance of the point to the bottom of the model, `y' by the # azimuth angle and `z' by the polar angle measured positive from the north # pole. The grid will be assumed to be a latitude-longitude grid. Note that # the order of spherical coordinates is `r', `phi', `theta' and not `r', # `theta', `phi', since this allows for dimension independent expressions. # # `box': A model in which the temperature is chosen constant on the sides of # a box which are selected by the parameters # Left/Right/Top/Bottom/Front/Back temperature # # `box with lithosphere boundary indicators': A model in which the # temperature is chosen constant on all the sides of a box. Additional # boundary indicators are added to the lithospheric parts of the vertical # boundaries. This model is to be used with the 'Two Merged Boxes' Geometry # Model. # # `constant': A model in which the temperature is chosen constant on a given # boundary indicator. Parameters are read from the subsection 'Constant'. # # `dynamic core': This is a boundary temperature model working only with # spherical shell geometry and core statistics postprocessor. The # temperature at the top is constant, and the core mantle boundary # temperature is dynamically evolving through time by calculating the heat # flux into the core and solving the core energy balance. The formulation is # mainly following \cite{NPB+04}, and the plugin is used in Zhang et al. # [2016]. The energy of core cooling and freeing of the inner core is # included in the plugin. However, current plugin can not deal with the # energy balance if the core is in the `snowing core' regime (i.e., the core # solidifies from the top instead of bottom). # # `function': Implementation of a model in which the boundary temperature is # given in terms of an explicit formula that is elaborated in the parameters # in section ``Boundary temperature model|Function''. # # Since the symbol $t$ indicating time may appear in the formulas for the # prescribed temperatures, it is interpreted as having units seconds unless # the global input parameter ``Use years in output instead of seconds'' is # set, in which case we interpret the formula expressions as having units # year. # # Because this class simply takes what the function calculates, this class # can not know certain pieces of information such as the minimal and maximal # temperature on the boundary. For operations that require this, for example # in post-processing, this boundary temperature model must therefore be told # what the minimal and maximal values on the boundary are. This is done # using parameters set in section ``Boundary temperature model/Initial # temperature''. # # The format of these functions follows the syntax understood by the # muparser library, see Section~\ref{sec:muparser-format}. # # `initial temperature': A model in which the temperature at the boundary is # chosen to be the same as given in the initial conditions. # # Because this class simply takes what the initial temperature had # described, this class can not know certain pieces of information such as # the minimal and maximal temperature on the boundary. For operations that # require this, for example in post-processing, this boundary temperature # model must therefore be told what the minimal and maximal values on the # boundary are. This is done using parameters set in section ``Boundary # temperature model/Initial temperature''. # # `spherical constant': A model in which the temperature is chosen constant # on the inner and outer boundaries of a spherical shell, ellipsoidal chunk # or chunk. Parameters are read from subsection 'Spherical constant'. set List of model names = # A comma-separated list of operators that will be used to append the listed # temperature models onto the previous models. If only one operator is # given, the same operator is applied to all models. set List of model operators = add # Select one of the following models: # # `ascii data': Implementation of a model in which the boundary data is # derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `Temperature [K]' in a 2d # model and `x', `y', `Temperature [K]' in a 3d model, which means that # there has to be a single column containing the temperature. Note that the # data in the input files need to be sorted in a specific order: the first # coordinate needs to ascend first, followed by the second in order to # assign the correct data to the prescribed coordinates. If you use a # spherical model, then the assumed grid changes. `x' will be replaced by # the radial distance of the point to the bottom of the model, `y' by the # azimuth angle and `z' by the polar angle measured positive from the north # pole. The grid will be assumed to be a latitude-longitude grid. Note that # the order of spherical coordinates is `r', `phi', `theta' and not `r', # `theta', `phi', since this allows for dimension independent expressions. # # `box': A model in which the temperature is chosen constant on the sides of # a box which are selected by the parameters # Left/Right/Top/Bottom/Front/Back temperature # # `box with lithosphere boundary indicators': A model in which the # temperature is chosen constant on all the sides of a box. Additional # boundary indicators are added to the lithospheric parts of the vertical # boundaries. This model is to be used with the 'Two Merged Boxes' Geometry # Model. # # `constant': A model in which the temperature is chosen constant on a given # boundary indicator. Parameters are read from the subsection 'Constant'. # # `dynamic core': This is a boundary temperature model working only with # spherical shell geometry and core statistics postprocessor. The # temperature at the top is constant, and the core mantle boundary # temperature is dynamically evolving through time by calculating the heat # flux into the core and solving the core energy balance. The formulation is # mainly following \cite{NPB+04}, and the plugin is used in Zhang et al. # [2016]. The energy of core cooling and freeing of the inner core is # included in the plugin. However, current plugin can not deal with the # energy balance if the core is in the `snowing core' regime (i.e., the core # solidifies from the top instead of bottom). # # `function': Implementation of a model in which the boundary temperature is # given in terms of an explicit formula that is elaborated in the parameters # in section ``Boundary temperature model|Function''. # # Since the symbol $t$ indicating time may appear in the formulas for the # prescribed temperatures, it is interpreted as having units seconds unless # the global input parameter ``Use years in output instead of seconds'' is # set, in which case we interpret the formula expressions as having units # year. # # Because this class simply takes what the function calculates, this class # can not know certain pieces of information such as the minimal and maximal # temperature on the boundary. For operations that require this, for example # in post-processing, this boundary temperature model must therefore be told # what the minimal and maximal values on the boundary are. This is done # using parameters set in section ``Boundary temperature model/Initial # temperature''. # # The format of these functions follows the syntax understood by the # muparser library, see Section~\ref{sec:muparser-format}. # # `initial temperature': A model in which the temperature at the boundary is # chosen to be the same as given in the initial conditions. # # Because this class simply takes what the initial temperature had # described, this class can not know certain pieces of information such as # the minimal and maximal temperature on the boundary. For operations that # require this, for example in post-processing, this boundary temperature # model must therefore be told what the minimal and maximal values on the # boundary are. This is done using parameters set in section ``Boundary # temperature model/Initial temperature''. # # `spherical constant': A model in which the temperature is chosen constant # on the inner and outer boundaries of a spherical shell, ellipsoidal chunk # or chunk. Parameters are read from subsection 'Spherical constant'. # # \textbf{Warning}: This parameter provides an old and deprecated way of # specifying boundary temperature models and shouldn't be used. Please use # 'List of model names' instead. set Model name = spherical constant # default: unspecified subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/boundary-temperature/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = box_2d_%s.%d.txt # Time step between following data files. Depending on the setting of the # global `Use years in output instead of seconds' flag in the input file, # this number is either interpreted as seconds or as years. The default is # one million, i.e., either one million seconds or one million years. set Data file time step = 1e6 # In some cases the boundary files are not numbered in increasing but in # decreasing order (e.g. `Ma BP'). If this flag is set to `True' the # plugin will first load the file with the number `First data file number' # and decrease the file number during the model run. set Decreasing file order = false # Time from which on the data file with number `First data file number' is # used as boundary condition. Until this time, a boundary condition equal # to zero everywhere is assumed. Depending on the setting of the global # `Use years in output instead of seconds' flag in the input file, this # number is either interpreted as seconds or as years. set First data file model time = 0 # Number of the first velocity file to be loaded when the model time is # larger than `First velocity file model time'. set First data file number = 0 # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Box # Temperature at the bottom boundary (at minimal $z$-value). Units: # $\si{K}$. set Bottom temperature = 0. # Temperature at the left boundary (at minimal $x$-value). Units: # $\si{K}$. set Left temperature = 1. # Temperature at the right boundary (at maximal $x$-value). Units: # $\si{K}$. set Right temperature = 0. # Temperature at the top boundary (at maximal $x$-value). Units: # $\si{K}$. set Top temperature = 0. end subsection Box with lithosphere boundary indicators # Temperature at the bottom boundary (at minimal $z$-value). Units: # $\si{K}$. set Bottom temperature = 0. # Temperature at the left boundary (at minimal $x$-value). Units: # $\si{K}$. set Left temperature = 1. # Temperature at the additional left lithosphere boundary (specified by # user in Geometry Model). Units: $\si{K}$. set Left temperature lithosphere = 0. # Temperature at the right boundary (at maximal $x$-value). Units: # $\si{K}$. set Right temperature = 0. # Temperature at the additional right lithosphere boundary (specified by # user in Geometry Model). Units: $\si{K}$. set Right temperature lithosphere = 0. # Temperature at the top boundary (at maximal $x$-value). Units: # $\si{K}$. set Top temperature = 0. end subsection Constant # A comma separated list of mappings between boundary indicators and the # temperature associated with the boundary indicators. The format for this # list is ``indicator1 : value1, indicator2 : value2, ...'', where each # indicator is a valid boundary indicator (either a number or the symbolic # name of a boundary as provided by the geometry model) and each value is # the temperature of that boundary. set Boundary indicator to temperature mappings = end subsection Dynamic core # Core thermal expansivity. Units: $1/K$. set Alpha = 1.35e-5 # Compositional expansion coefficient $Beta_c$. See \cite{NPB+04} for more # details. set Beta composition = 1.1 # Pressure at CMB. Units: $Pa$. set CMB pressure = 0.14e12 # Core heat conductivity $k_c$. Units: $W/m/K$. set Core conductivity = 60. # Density of the core. Units: $kg/m^3$. set Core density = 12.5e3 # Heat capacity of the core. Units: $J/kg/K$. set Core heat capacity = 840. # Partition coefficient of the light element. set Delta = 0.5 # Gravitation acceleration at CMB. Units: $m/s^2$. set Gravity acceleration = 9.8 # Initial light composition (eg. S,O) concentration in weight fraction. set Initial light composition = 0.01 # Temperature at the inner boundary (core mantle boundary) at the # beginning. Units: $\si{K}$. set Inner temperature = 6000. # Core compressibility at zero pressure. See \cite{NPB+04} for more # details. set K0 = 4.111e11 # The latent heat of core freeze. Units: $J/kg$. set Lh = 750e3 # The max iterations for nonliner core energy solver. set Max iteration = 30000 # Temperature at the outer boundary (lithosphere water/air). Units: # $\si{K}$. set Outer temperature = 0. # The heat of reaction. Units: $J/kg$. set Rh = -27.7e6 # Core density at zero pressure. Units: $kg/m^3$. See \cite{NPB+04} for # more details. set Rho0 = 7.019e3 # Initial inner core radius changing rate. Units: $km/year$. set dR over dt = 0. # Initial CMB temperature changing rate. Units: $K/year$. set dT over dt = 0. # Initial light composition changing rate. Units: $1/year$. set dX over dt = 0. subsection Geotherm parameters # If melting curve dependent on composition. set Composition dependency = true # Melting curve (\cite{NPB+04} eq. (40)) parameter Theta. set Theta = 0.11 # Melting curve (\cite{NPB+04} eq. (40)) parameter Tm0. Units: # $\si{K}$. set Tm0 = 1695. # Melting curve (\cite{NPB+04} eq. (40)) parameter Tm1. Units: $1/Tpa$. set Tm1 = 10.9 # Melting curve (\cite{NPB+04} eq. (40)) parameter Tm2. Units: # $1/TPa^2$. set Tm2 = -8.0 # If using the Fe-FeS system solidus from Buono \& Walker (2011) # instead. set Use BW11 = false end subsection Other energy source # Data file name for other energy source into the core. The 'other # energy source' is used for external core energy source.For example if # someone want to test the early lunar core powered by precession # (Dwyer, C. A., et al. (2011). A long-lived lunar dynamo driven by # continuous mechanical stirring. Nature 479(7372): 212-214.)Format # [Time(Gyr) Energy rate(W)] set File name = end subsection Radioactive heat source # Half decay times of different elements (Ga) set Half life times = # Heating rates of different elements (W/kg) set Heating rates = # Initial concentrations of different elements (ppm) set Initial concentrations = # Number of different radioactive heating elements in core set Number of radioactive heating elements = 0 end end subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `cartesian', `spherical', and # `depth'. `spherical' coordinates are interpreted as r,phi or r,phi,theta # in 2D/3D respectively with theta being the polar angle. `depth' will # create a function, in which only the first parameter is non-zero, which # is interpreted to be the depth of the point. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # Maximal temperature. Units: $\si{K}$. set Maximal temperature = 3773. # Minimal temperature. Units: $\si{K}$. set Minimal temperature = 273. # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Initial temperature # Maximal temperature. Units: $\si{K}$. set Maximal temperature = 3773. # Minimal temperature. Units: $\si{K}$. set Minimal temperature = 0. end subsection Spherical constant # Temperature at the inner boundary (core mantle boundary). Units: # $\si{K}$. set Inner temperature = 2000.0 # default: 6000. # Temperature at the outer boundary (lithosphere water/air). Units: # $\si{K}$. set Outer temperature = 290.0 # default: 0. end end subsection Boundary traction model # A comma separated list denoting those boundaries on which a traction force # is prescribed, i.e., where known external forces act, resulting in an # unknown velocity. This is often used to model ``open'' boundaries where we # only know the pressure. This pressure then produces a force that is normal # to the boundary and proportional to the pressure. # # The format of valid entries for this parameter is that of a map given as # ``key1 [selector]: value1, key2 [selector]: value2, key3: value3, ...'' # where each key must be a valid boundary indicator (which is either an # integer or the symbolic name the geometry model in use may have provided # for this part of the boundary) and each value must be one of the currently # implemented boundary traction models. ``selector'' is an optional string # given as a subset of the letters `xyz' that allows you to apply the # boundary conditions only to the components listed. As an example, '1 y: # function' applies the type `function' to the y component on boundary 1. # Without a selector it will affect all components of the traction. set Prescribed traction boundary indicators = subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/boundary-traction/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = box_2d_%s.%d.txt # Time step between following data files. Depending on the setting of the # global `Use years in output instead of seconds' flag in the input file, # this number is either interpreted as seconds or as years. The default is # one million, i.e., either one million seconds or one million years. set Data file time step = 1e6 # In some cases the boundary files are not numbered in increasing but in # decreasing order (e.g. `Ma BP'). If this flag is set to `True' the # plugin will first load the file with the number `First data file number' # and decrease the file number during the model run. set Decreasing file order = false # Time from which on the data file with number `First data file number' is # used as boundary condition. Until this time, a boundary condition equal # to zero everywhere is assumed. Depending on the setting of the global # `Use years in output instead of seconds' flag in the input file, this # number is either interpreted as seconds or as years. set First data file model time = 0 # Number of the first velocity file to be loaded when the model time is # larger than `First velocity file model time'. set First data file number = 0 # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `cartesian', `spherical', and # `depth'. `spherical' coordinates are interpreted as r,phi or r,phi,theta # in 2D/3D respectively with theta being the polar angle. `depth' will # create a function, in which only the first parameter is non-zero, which # is interpreted to be the depth of the point. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0; 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Initial lithostatic pressure # The number of integration points over which we integrate the lithostatic # pressure downwards. set Number of integration points = 1000 # The point where the pressure profile will be calculated. Cartesian # coordinates when geometry is a box, otherwise enter radius, longitude, # and in 3D latitude.Units: $\si{m}$ or degrees. set Representative point = end end subsection Boundary velocity model # A comma separated list denoting those boundaries on which the velocity is # prescribed, i.e., where unknown external forces act to prescribe a # particular velocity. This is often used to prescribe a velocity that # equals that of overlying plates. # # The format of valid entries for this parameter is that of a map given as # ``key1 [selector]: value1, key2 [selector]: value2, key3: value3, ...'' # where each key must be a valid boundary indicator (which is either an # integer or the symbolic name the geometry model in use may have provided # for this part of the boundary) and each value must be one of the currently # implemented boundary velocity models. ``selector'' is an optional string # given as a subset of the letters `xyz' that allows you to apply the # boundary conditions only to the components listed. As an example, '1 y: # function' applies the type `function' to the y component on boundary 1. # Without a selector it will affect all components of the velocity. # # Note that the no-slip boundary condition is a special case of the current # one where the prescribed velocity happens to be zero. It can thus be # implemented by indicating that a particular boundary is part of the ones # selected using the current parameter and using ``zero velocity'' as the # boundary values. Alternatively, you can simply list the part of the # boundary on which the velocity is to be zero with the parameter ``Zero # velocity boundary indicator'' in the current parameter section. # # Note that when ``Use years in output instead of seconds'' is set to true, # velocity should be given in m/yr. The following boundary velocity models # are available: # # `ascii data': Implementation of a model in which the boundary velocity is # derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `velocity${}_x$', # `velocity${}_y$' in a 2d model or `x', `y', `velocity${}_x$', # `velocity${}_y$', `velocity${}_z$' in a 3d model. Note that the data in # the input files need to be sorted in a specific order: the first # coordinate needs to ascend first, followed by the second in order to # assign the correct data to the prescribed coordinates.If you use a # spherical model, then the assumed grid changes. `x' will be replaced by # the radial distance of the point to the bottom of the model, `y' by the # azimuth angle and `z' by the polar angle measured positive from the north # pole. The grid will be assumed to be a latitude-longitude grid. Note that # the order of spherical coordinates is `r', `phi', `theta' and not `r', # `theta', `phi', since this allows for dimension independent expressions. # Velocities can be specified using cartesian (by default) or spherical unit # vectors. No matter which geometry model is chosen, the unit of the # velocities is assumed to be m/s or m/yr depending on the `Use years in # output instead of seconds' flag. If you provide velocities in cm/yr, set # the `Scale factor' option to 0.01. # # `function': Implementation of a model in which the boundary velocity is # given in terms of an explicit formula that is elaborated in the parameters # in section ``Boundary velocity model|Function''. The format of these # functions follows the syntax understood by the muparser library, see # Section~\ref{sec:muparser-format}. # # The formula you describe in the mentioned section is a semicolon separated # list of velocities for each of the $d$ components of the velocity vector. # These $d$ formulas are interpreted as having units m/s, unless the global # input parameter ``Use years in output instead of seconds'' is set, in # which case we interpret the formula expressions as having units m/year. # # Likewise, since the symbol $t$ indicating time may appear in the formulas # for the prescribed velocities, it is interpreted as having units seconds # unless the global parameter above has been set. # # `gplates': Implementation of a model in which the boundary velocity is # derived from files that are generated by the GPlates program. # # `zero velocity': Implementation of a model in which the boundary velocity # is zero. This is commonly referred to as a ``stick boundary condition'', # indicating that the material ``sticks'' to the material on the other side # of the boundary. set Prescribed velocity boundary indicators = # A comma separated list of names denoting those boundaries on which the # velocity is tangential and unrestrained, i.e., free-slip where no external # forces act to prescribe a particular tangential velocity (although there # is a force that requires the flow to be tangential). # # The names of the boundaries listed here can either by numbers (in which # case they correspond to the numerical boundary indicators assigned by the # geometry object), or they can correspond to any of the symbolic names the # geometry object may have provided for each part of the boundary. You may # want to compare this with the documentation of the geometry model you use # in your model. set Tangential velocity boundary indicators = top # default: # A comma separated list of names denoting those boundaries on which the # velocity is zero. # # The names of the boundaries listed here can either by numbers (in which # case they correspond to the numerical boundary indicators assigned by the # geometry object), or they can correspond to any of the symbolic names the # geometry object may have provided for each part of the boundary. You may # want to compare this with the documentation of the geometry model you use # in your model. set Zero velocity boundary indicators = bottom # default: subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/boundary-velocity/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = box_2d_%s.%d.txt # Time step between following data files. Depending on the setting of the # global `Use years in output instead of seconds' flag in the input file, # this number is either interpreted as seconds or as years. The default is # one million, i.e., either one million seconds or one million years. set Data file time step = 1e6 # In some cases the boundary files are not numbered in increasing but in # decreasing order (e.g. `Ma BP'). If this flag is set to `True' the # plugin will first load the file with the number `First data file number' # and decrease the file number during the model run. set Decreasing file order = false # Time from which on the data file with number `First data file number' is # used as boundary condition. Until this time, a boundary condition equal # to zero everywhere is assumed. Depending on the setting of the global # `Use years in output instead of seconds' flag in the input file, this # number is either interpreted as seconds or as years. set First data file model time = 0 # Number of the first velocity file to be loaded when the model time is # larger than `First velocity file model time'. set First data file number = 0 # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. # Specify velocity as r, phi, and theta components instead of x, y, and z. # Positive velocities point up, east, and north (in 3D) or out and # clockwise (in 2D). This setting only makes sense for spherical # geometries. set Use spherical unit vectors = false end subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `cartesian', `spherical', and # `depth'. `spherical' coordinates are interpreted as r,phi or r,phi,theta # in 2D/3D respectively with theta being the polar angle. `depth' will # create a function, in which only the first parameter is non-zero, which # is interpreted to be the depth of the point. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0; 0 # Specify velocity as r, phi, and theta components instead of x, y, and z. # Positive velocities point up, east, and north (in 3D) or out and # clockwise (in 2D). This setting only makes sense for spherical # geometries. set Use spherical unit vectors = false # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection GPlates model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a '/') or relative to the current # directory. The path may also include the special text # '$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/boundary-velocity/gplates/ # Time step between following velocity files. Depending on the setting of # the global 'Use years in output instead of seconds' flag in the input # file, this number is either interpreted as seconds or as years. The # default is one million, i.e., either one million seconds or one million # years. set Data file time step = 1e6 # In some cases the boundary files are not numbered in increasing but in # decreasing order (e.g. 'Ma BP'). If this flag is set to 'True' the # plugin will first load the file with the number 'First velocity file # number' and decrease the file number during the model run. set Decreasing file order = false # Time from which on the velocity file with number 'First velocity file # number' is used as boundary condition. Previous to this time, a no-slip # boundary condition is assumed. Depending on the setting of the global # 'Use years in output instead of seconds' flag in the input file, this # number is either interpreted as seconds or as years. set First data file model time = 0. # Number of the first velocity file to be loaded when the model time is # larger than 'First velocity file model time'. set First data file number = 0 # Determines the depth of the lithosphere, so that the GPlates velocities # can be applied at the sides of the model as well as at the surface. set Lithosphere thickness = 100000. # Point that determines the plane in which a 2D model lies in. Has to be # in the format `a,b' where a and b are theta (polar angle) and phi in # radians. set Point one = 1.570796,0.0 # Point that determines the plane in which a 2D model lies in. Has to be # in the format `a,b' where a and b are theta (polar angle) and phi in # radians. set Point two = 1.570796,1.570796 # Scalar factor, which is applied to the boundary velocity. You might want # to use this to scale the velocities to a reference model (e.g. with # free-slip boundary) or another plate reconstruction. set Scale factor = 1. # The file name of the material data. Provide file in format: (Velocity # file name).\%d.gpml where \%d is any sprintf integer qualifier, # specifying the format of the current file number. set Velocity file name = phi.%d end end subsection Checkpointing # The number of timesteps between performing checkpoints. If 0 and time # between checkpoint is not specified, checkpointing will not be performed. # Units: None. set Steps between checkpoint = 50 # default: 0 # The wall time between performing checkpoints. If 0, will use the # checkpoint step frequency instead. Units: Seconds. set Time between checkpoint = 0 end subsection Compositional fields # A comma separated list denoting the solution method of each compositional # field. Each entry of the list must be one of the currently implemented # field types. # # These choices correspond to the following methods by which compositional # fields gain their values:\begin{itemize}\item ``field'': If a # compositional field is marked with this method, then its values are # computed in each time step by advecting along the values of the previous # time step using the velocity field, and applying reaction rates to it. In # other words, this corresponds to the usual notion of a composition field # as mentioned in Section~\ref{sec:compositional}. # \item ``particles'': If a compositional field is marked with this method, # then its values are obtained in each time step by interpolating the # corresponding properties from the particles located on each cell. The time # evolution therefore happens because particles move along with the velocity # field, and particle properties can react with each other as well. See # Section~\ref{sec:particles} for more information about how particles # behave. # \item ``volume of fluid``: If a compositional field is marked with this # method, then its values are obtained in each timestep by reconstructing a # polynomial finite element approximation on each cell from a volume of # fluid interface tracking method, which is used to compute the advection # updates. # \item ``static'': If a compositional field is marked this way, then it # does not evolve at all. Its values are simply set to the initial # conditions, and will then never change. # \item ``melt field'': If a compositional field is marked with this method, # then its values are computed in each time step by advecting along the # values of the previous time step using the melt velocity, and applying # reaction rates to it. In other words, this corresponds to the usual notion # of a composition field as mentioned in Section~\ref{sec:compositional}, # except that it is advected with the melt velocity instead of the solid # velocity. This method can only be chosen if melt transport is active in # the model. # \item ``prescribed field'': The value of these fields is determined in # each time step from the material model. If a compositional field is marked # with this method, then the value of a specific additional material model # output, called the `PrescribedFieldOutputs' is interpolated onto the # field. This field does not change otherwise, it is not advected with the # flow. # \item ``prescribed field with diffusion'': If a compositional field is # marked this way, the value of a specific additional material model output, # called the `PrescribedFieldOutputs' is interpolated onto the field, as in # the ``prescribed field'' method. Afterwards, the field is diffused based # on a solver parameter, the diffusion length scale, smoothing the field. # Specifically, the field is updated by solving the equation $(I-l^2 \Delta) # C_\text{smoothed} = C_\text{prescribed}$, where $l$ is the diffusion # length scale. Note that this means that the amount of diffusion is # independent of the time step size, and that the field is not advected with # the flow.\end{itemize} set Compositional field methods = # A list of integers smaller than or equal to the number of compositional # fields. All compositional fields in this list will be normalized before # the first timestep. The normalization is implemented in the following way: # First, the sum of the fields to be normalized is calculated at every point # and the global maximum is determined. Second, the compositional fields to # be normalized are divided by this maximum. set List of normalized fields = # A comma separated list denoting the particle properties that will be # projected to those compositional fields that are of the ``particles'' # field type. # # The format of valid entries for this parameter is that of a map given as # ``key1: value1, key2: value2 [component2], key3: value3 [component4], # ...'' where each key must be a valid field name of the ``particles'' type, # and each value must be one of the currently selected particle properties. # Component is a component index of the particle property that is 0 by # default, but can be set up to n-1, where n is the number of vector # components of this particle property. The component indicator only needs # to be set if not the first component of the particle property should be # mapped (e.g. the $y$-component of the velocity at the particle # positions). set Mapped particle properties = # A user-defined name for each of the compositional fields requested. set Names of fields = # The number of fields that will be advected along with the flow field, # excluding velocity, pressure and temperature. set Number of fields = 0 end subsection Discretization # The polynomial degree to use for the composition variable(s). As an # example, a value of 2 for this parameter will yield either the element # $Q_2$ or $DGQ_2$ for the compositional field(s), depending on whether we # use continuous or discontinuous field(s). # # For continuous elements, the value needs to be 1 or larger as $Q_1$ is the # lowest order element, while $DGQ_0$ is a valid choice. Units: None. set Composition polynomial degree = 2 # The polynomial degree to use for the velocity variables in the Stokes # system. The polynomial degree for the pressure variable will then be one # less in order to make the velocity/pressure pair conform with the usual # LBB (Babu{\v s}ka-Brezzi) condition. In other words, we are using a # Taylor-Hood element for the Stokes equations and this parameter indicates # the polynomial degree of it. As an example, a value of 2 for this # parameter will yield the element $Q_2^d \times Q_1$ for the $d$ velocity # components and the pressure, respectively (unless the `Use locally # conservative discretization' parameter is set, which modifies the pressure # element). # # Be careful if you choose 1 as the degree. The resulting element is not # stable and it may lead to artifacts in the solution. Units: None. set Stokes velocity polynomial degree = 2 # The polynomial degree to use for the temperature variable. As an example, # a value of 2 for this parameter will yield either the element $Q_2$ or # $DGQ_2$ for the temperature field, depending on whether we use a # continuous or discontinuous field. Units: None. set Temperature polynomial degree = 2 # Whether to use a composition discretization that is discontinuous as # opposed to continuous. This then requires the assembly of face terms # between cells, and weak imposition of boundary terms for the composition # field via the discontinuous Galerkin method. set Use discontinuous composition discretization = false # Whether to use a temperature discretization that is discontinuous as # opposed to continuous. This then requires the assembly of face terms # between cells, and weak imposition of boundary terms for the temperature # field via the interior-penalty discontinuous Galerkin method. set Use discontinuous temperature discretization = false # By default (i.e., when this parameter is set to its default value `false') # \aspect{} uses finite element combinations in which the pressure shape # functions are polynomials one degree lower than the shape functions for # the velocity. An example is the Taylor-Hood element that uses $Q_k$ # elements for the velocity and $Q_{k-1}$ for the pressure. This is because # using the \textit{same} polynomial degree for both the velocity and the # pressure turns out to violate some mathematical properties necessary to # make the problem solvable. (In particular, thecondition in question goes # by the name ``inf-sup'' or Babu{\v s}ka-Brezzi or LBB condition.) A # consequence of violating this condition is that the pressure may show # oscillations and not converge to the correct pressure. # # That said, people have often used $Q_1$ elements for both thevelocity and # pressure anyway. This is commonly referred to as using the Q1-Q1 method. # It is, by default, not stable as mentioned above, but it can be made # stable by adding small amount of compressibility to the model. There are # numerous ways to do that. Today, the way that is generally considered to # be the best approach is the one by Dohrmann and Bochev # \cite{DohrmannBochev2004}. # # When this parameter is set to ``true'', then \aspect{} will use this # method by using $Q_k imes Q_k$ elements for velocity and pressure, # respectively, where $k$ is the value provided for the parameter ``Stokes # velocity polynomial degree''. # # \note{While \aspect{} \textit{allows} you to use this method, it is # generally understood that this is not a great idea as it leads to rather # low accuracy in general. It also leads to substantial problems when # using free surfaces. As a consequence, the presence of this parameter # should not be seen as an endorsement of the method, or a suggestion to # actually use it. It simply makes the method available.} set Use equal order interpolation for Stokes = false # Whether to use a Stokes discretization that is locally conservative at the # expense of a larger number of degrees of freedom (true), or to go with a # cheaper discretization that does not locally conserve mass, although it is # globally conservative (false). # # When using a locally conservative discretization, the finite element space # for the pressure is discontinuous between cells and is the polynomial # space $P_{-(k-1)}$ of polynomials of degree $k-1$ in each variable # separately. Here, $k$ is the value given in the parameter ``Stokes # velocity polynomial degree'', and consequently the polynomial degree for # the pressure, $k-1$, is one lower than that for the velocity. # # As a consequence of choosing this element for the pressure rather than the # more commonly used $Q_{k-1}$ element that is continuous, it can be shown # that if the medium is considered incompressible then the computed discrete # velocity field $\mathbf u_h$ satisfies the property $\int_ {\partial K} # \mathbf u_h \cdot \mathbf n = 0$ for every cell $K$, i.e., for each cell # inflow and outflow exactly balance each other as one would expect for an # incompressible medium. In other words, the velocity field is # \textit{locally conservative}. # # On the other hand, if this parameter is set to ``false''(the default), # then the finite element space is chosen as $Q_{k-1}$. This choice does not # yield the local conservation property but has the advantage of requiring # fewer degrees of freedom. Furthermore, the error is generally smaller with # this choice. # # For an in-depth discussion of these issues and a quantitative evaluation # of the different choices, see \cite{KHB12}. set Use locally conservative discretization = false subsection Stabilization parameters # The value used to penalize discontinuities in the discontinuous Galerkin # method. This is used only for the temperature field, and not for the # composition field, as pure advection does not use the interior penalty # method. This is largely empirically decided -- it must be large enough # to ensure the bilinear form is coercive, but not so large as to penalize # discontinuity at all costs. set Discontinuous penalty = 10. # The maximum global composition values that will be used in the bound # preserving limiter for the discontinuous solutions from composition # advection fields. The number of the input 'Global composition maximum' # values separated by ',' has to be the same as the number of the # compositional fields set Global composition maximum = 1.7976931348623157e+308 # The minimum global composition value that will be used in the bound # preserving limiter for the discontinuous solutions from composition # advection fields. The number of the input 'Global composition minimum' # values separated by ',' has to be the same as the number of the # compositional fields set Global composition minimum = -1.7976931348623157e+308 # The maximum global temperature value that will be used in the bound # preserving limiter for the discontinuous solutions from temperature # advection fields. set Global temperature maximum = 1.7976931348623157e+308 # The minimum global temperature value that will be used in the bound # preserving limiter for the discontinuous solutions from temperature # advection fields. set Global temperature minimum = -1.7976931348623157e+308 # Select the method for stabilizing the advection equation. The original # method implemented is 'entropy viscosity' as described in \cite {KHB12}. # SUPG is currently experimental. set Stabilization method = SUPG # default: entropy viscosity # If set to false, the artificial viscosity of a cell is computed and is # computed on every cell separately as discussed in \cite{KHB12}. If set # to true, the maximum of the artificial viscosity in the cell as well as # the neighbors of the cell is computed and used instead. set Use artificial viscosity smoothing = false # Whether to apply the bound preserving limiter as a correction after # having the discontinuous composition solution. Currently we apply this # only to the compositional solution if the 'Global composition maximum' # and 'Global composition minimum' are already defined in the .prm file. # This limiter keeps the discontinuous solution in the range given by # Global composition maximum' and 'Global composition minimum'. set Use limiter for discontinuous composition solution = false # Whether to apply the bound preserving limiter as a correction after # computing the discontinuous temperature solution. Currently we apply # this only to the temperature solution if the 'Global temperature # maximum' and 'Global temperature minimum' are already defined in the # .prm file. This limiter keeps the discontinuous solution in the range # given by 'Global temperature maximum' and 'Global temperature minimum'. set Use limiter for discontinuous temperature solution = false # The exponent $\alpha$ in the entropy viscosity stabilization. Valid # options are 1 or 2. The recommended setting is 2. (This parameter does # not correspond to any variable in the 2012 paper by Kronbichler, Heister # and Bangerth that describes ASPECT, see \cite{KHB12}. Rather, the paper # always uses 2 as the exponent in the definition of the entropy, # following equation (15) of the paper. The full approach is discussed in # \cite{GPP11}.) Note that this is not the thermal expansion coefficient, # also commonly referred to as $\alpha$.Units: None. set alpha = 2 # The $\beta$ factor in the artificial viscosity stabilization. This # parameter controls the maximum dissipation of the entropy viscosity, # which is the part that only scales with the cell diameter and the # maximum velocity in the cell, but does not depend on the solution field # itself or its residual. An appropriate value for 2d is 0.052 and 0.78 # for 3d. (For historical reasons, the name used here is different from # the one used in the 2012 paper by Kronbichler, Heister and Bangerth that # describes ASPECT, see \cite{KHB12}. This parameter can be given as a # single value or as a list with as many entries as one plus the number of # compositional fields. In the former case all advection fields use the # same stabilization parameters, in the latter case each field # (temperature first, then all compositions) use individual parameters. # This can be useful to reduce the stabilization for the temperature, # which already has some physical diffusion. This parameter corresponds to # the factor $\alpha_{\text{max}}$ in the formulas following equation (15) # of the paper.) Units: None. set beta = 0.052 # The $c_R$ factor in the entropy viscosity stabilization. This parameter # controls the part of the entropy viscosity that depends on the solution # field itself and its residual in addition to the cell diameter and the # maximum velocity in the cell. This parameter can be given as a single # value or as a list with as many entries as one plus the number of # compositional fields. In the former case all advection fields use the # same stabilization parameters, in the latter case each field # (temperature first, then all compositions) use individual parameters. # This can be useful to reduce the stabilization for the temperature, # which already has some physical diffusion. (For historical reasons, the # name used here is different from the one used in the 2012 paper by # Kronbichler, Heister and Bangerth that describes ASPECT, see # \cite{KHB12}. This parameter corresponds to the factor $\alpha_E$ in the # formulas following equation (15) of the paper.) Units: None. set cR = 0.11 # The strain rate scaling factor in the artificial viscosity # stabilization. This parameter determines how much the strain rate (in # addition to the velocity) should influence the stabilization. (This # parameter does not correspond to any variable in the 2012 paper by # Kronbichler, Heister and Bangerth that describes ASPECT, see # \cite{KHB12}. Rather, the paper always uses 0, i.e. they specify the # maximum dissipation $\nu_h^\text{max}$ as $\nu_h^\text{max}\vert_K = # \alpha_{\text{max}} h_K \|\mathbf u\|_{\infty,K}$. Here, we use # $\|\lvert\mathbf u\rvert + \gamma h_K \lvert\varepsilon (\mathbf # u)\rvert\|_{\infty,K}$ instead of $\|\mathbf u\|_{\infty,K}$. Units: # None. set gamma = 0.0 end end subsection Formulation # Whether to ask the material model for additional terms for the right-hand # side of the Stokes equation. This feature is likely only used when # implementing force vectors for manufactured solution problems and requires # filling additional outputs of type AdditionalMaterialOutputsStokesRHS. set Enable additional Stokes RHS = false # Whether to include the additional elastic terms on the right-hand side of # the Stokes equation. set Enable elasticity = false # Whether to include additional terms on the right-hand side of the Stokes # equation to set a given compression term specified in the MaterialModel # output PrescribedPlasticDilation. set Enable prescribed dilation = false # Select a formulation for the basic equations. Different published # formulations are available in ASPECT (see the list of possible values for # this parameter in the manual for available options). Two ASPECT specific # options are # \begin{enumerate} # \item `isentropic compression': ASPECT's original formulation, using the # explicit compressible mass equation, and the full density for the # temperature equation. # \item `custom': A custom selection of `Mass conservation' and `Temperature # equation'. # \end{enumerate} # # \note{Warning: The `custom' option is implemented for advanced users that # want full control over the equations solved. It is possible to choose # inconsistent formulations and no error checking is performed on the # consistency of the resulting equations.} # # \note{The `anelastic liquid approximation' option here can also be used to # set up the `truncated anelastic liquid approximation' as long as this # option is chosen together with a material model that defines a density # that depends on temperature and depth and not on the pressure.} set Formulation = Boussinesq approximation # default: custom # Possible approximations for the density derivatives in the mass # conservation equation. Note that this parameter is only evaluated if # `Formulation' is set to `custom'. Other formulations ignore the value of # this parameter. set Mass conservation = ask material model # Possible approximations for the density in the temperature equation. # Possible approximations are `real density' and `reference density # profile'. Note that this parameter is only evaluated if `Formulation' is # set to `custom'. Other formulations ignore the value of this parameter. set Temperature equation = real density end subsection Geometry model # Select one of the following models: # # `box': A box geometry parallel to the coordinate directions. The extent of # the box in each coordinate direction is set in the parameter file. The box # geometry labels its 2*dim sides as follows: in 2d, boundary indicators 0 # through 3 denote the left, right, bottom and top boundaries; in 3d, # boundary indicators 0 through 5 indicate left, right, front, back, bottom # and top boundaries (see also the documentation of the deal.II class # ``GeometryInfo''). You can also use symbolic names ``left'', ``right'', # etc., to refer to these boundaries in input files. It is also possible to # add initial topography to the box model. Note however that this is done # after the last initial adaptive refinement cycle. Also, initial topography # is supposed to be small, as it is not taken into account when depth or a # representative point is computed. # # `box with lithosphere boundary indicators': A box geometry parallel to the # coordinate directions. The extent of the box in each coordinate direction # is set in the parameter file. This geometry model labels its sides with # 2*dim+2*(dim-1) boundary indicators: in 2d, boundary indicators 0 through # 3 denote the left, right, bottom and top boundaries, while indicators4 and # 5 denote the upper part of the left and right vertical boundary, # respectively. In 3d, boundary indicators 0 through 5 indicate left, right, # front, back, bottom and top boundaries (see also the documentation of the # deal.II class ``GeometryInfo''), while indicators 6, 7, 8 and 9 denote the # left, right, front and back upper parts of the vertical boundaries, # respectively. You can also use symbolic names ``left'', ``right'', ``left # lithosphere'', etc., to refer to these boundaries in input files. # # Note that for a given ``Global refinement level'' and no user-specified # ``Repetitions'', the lithosphere part of the mesh will be more refined. # # The additional boundary indicators for the lithosphere allow for selecting # boundary conditions for the lithosphere different from those for the # underlying mantle. An example application of this geometry is to prescribe # a velocity on the lithospheric plates, but use open boundary conditions # underneath. # # `chunk': A geometry which can be described as a chunk of a spherical # shell, bounded by lines of longitude, latitude and radius. The minimum and # maximum longitude, latitude (if in 3d) and depth of the chunk is set in # the parameter file. The chunk geometry labels its 2*dim sides as follows: # ``west'' and ``east'': minimum and maximum longitude, ``south'' and # ``north'': minimum and maximum latitude, ``inner'' and ``outer'': minimum # and maximum radii. # # The dimensions of the model are specified by parameters of the following # form: Chunk (minimum || maximum) (longitude || latitude): edges of # geographical quadrangle (in degrees)Chunk (inner || outer) radius: Radii # at bottom and top of chunk(Longitude || Latitude || Radius) repetitions: # number of cells in each coordinate direction. # # When used in 2d, this geometry does not imply the use of a spherical # coordinate system. Indeed, in 2d the geometry is simply a sector of an # annulus in a Cartesian coordinate system and consequently would correspond # to a sector of a cross section of the fluid filled space between two # infinite cylinders where one has made the assumption that the velocity in # direction of the cylinder axes is zero. This is consistent with the # definition of what we consider the two-dimension case given in # Section~\ref{sec:meaning-of-2d}. It is also possible to add initial # topography to the chunk geometry, based on an ascii data file. # # `ellipsoidal chunk': A 3D chunk geometry that accounts for Earth's # ellipticity (default assuming the WGS84 ellipsoid definition) which can be # defined in non-coordinate directions. In the description of the # ellipsoidal chunk, two of the ellipsoidal axes have the same length so # that there is only a semi-major axis and a semi-minor axis. The user has # two options for creating an ellipsoidal chunk geometry: 1) by defining two # opposing points (SW and NE or NW and SE) a coordinate parallel ellipsoidal # chunk geometry will be created. 2) by defining three points a # non-coordinate parallel ellipsoidal chunk will be created. The points are # defined in the input file by longitude:latitude. It is also possible to # define additional subdivisions of the mesh in each direction. The boundary # of the domain is formed by linear interpolation in longitude-latitude # space between adjacent points (i.e. [lon, lat](f) = [lon1*f + lon2*(1-f), # lat1*f + lat2*(1-f)], where f is a value between 0 and 1). Faces of the # model are defined as 0, west; 1,east; 2, south; 3, north; 4, inner; 5, # outer. # # This geometry model supports initial topography for deforming the initial # mesh. # # `sphere': A geometry model for a sphere with a user specified radius. This # geometry has only a single boundary, so the only valid boundary indicator # to specify in input files is ``0''. It can also be referenced by the # symbolic name ``surface'' in input files. # # Despite the name, this geometry does not imply the use of a spherical # coordinate system when used in 2d. Indeed, in 2d the geometry is simply a # circle in a Cartesian coordinate system and consequently would correspond # to a cross section of the fluid filled interior of an infinite cylinder # where one has made the assumption that the velocity in direction of the # cylinder axes is zero. This is consistent with the definition of what we # consider the two-dimension case given in Section~\ref{sec:meaning-of-2d}. # # `spherical shell': A geometry representing a spherical shell or a piece of # it. Inner and outer radii are read from the parameter file in subsection # 'Spherical shell'. # # The spherical shell may be generated as per the original code (with # respect to the inner and outer radius, and an initial number of cells # along circumference) or following a custom mesh scheme: list of radial # values or number of slices. A surface mesh is first generated and refined # as desired, before it is extruded radially. A list of radial values # subdivides the spherical shell at specified radii. The number of slices # subdivides the spherical shell into N slices of equal thickness. The # custom spherical shell only works with an opening angle of 360 degrees. # # Despite the name, this geometry does not imply the use of a spherical # coordinate system when used in 2d. Indeed, in 2d the geometry is simply an # annulus in a Cartesian coordinate system and consequently would correspond # to a cross section of the fluid filled space between two infinite # cylinders where one has made the assumption that the velocity in direction # of the cylinder axes is zero. This is consistent with the definition of # what we consider the two-dimension case given in # Section~\ref{sec:meaning-of-2d}. # # The model assigns boundary indicators as follows: In 2d, inner and outer # boundaries get boundary indicators zero and one, and if the opening angle # set in the input file is less than 360, then left and right boundaries are # assigned indicators two and three. These boundaries can also be referenced # using the symbolic names `inner', `outer' and (if applicable) `left', # `right'. # # In 3d, inner and outer indicators are treated as in 2d. If the opening # angle is chosen as 90 degrees, i.e., the domain is the intersection of a # spherical shell and the first octant, then indicator 2 is at the face # $x=0$, 3 at $y=0$, and 4 at $z=0$. These last three boundaries can then # also be referred to as `east', `west' and `south' symbolically in input # files. set Model name = spherical shell # default: unspecified subsection Box # X coordinate of box origin. Units: $\si{m}$. set Box origin X coordinate = 0. # Y coordinate of box origin. Units: $\si{m}$. set Box origin Y coordinate = 0. # Z coordinate of box origin. This value is ignored if the simulation is # in 2d. Units: $\si{m}$. set Box origin Z coordinate = 0. # Extent of the box in x-direction. Units: $\si{m}$. set X extent = 1. # Whether the box should be periodic in X direction set X periodic = false # Number of cells in X direction. set X repetitions = 1 # Extent of the box in y-direction. Units: $\si{m}$. set Y extent = 1. # Whether the box should be periodic in Y direction set Y periodic = false # Number of cells in Y direction. set Y repetitions = 1 # Extent of the box in z-direction. This value is ignored if the # simulation is in 2d. Units: $\si{m}$. set Z extent = 1. # Whether the box should be periodic in Z direction set Z periodic = false # Number of cells in Z direction. set Z repetitions = 1 end subsection Box with lithosphere boundary indicators # X coordinate of box origin. Units: $\si{m}$. set Box origin X coordinate = 0. # Y coordinate of box origin. Units: $\si{m}$. set Box origin Y coordinate = 0. # Z coordinate of box origin. This value is ignored if the simulation is # in 2d. Units: $\si{m}$. set Box origin Z coordinate = 0. # The thickness of the lithosphere used to create additional boundary # indicators to set specific boundary conditions for the lithosphere. set Lithospheric thickness = 0.2 # Extent of the box in x-direction. Units: $\si{m}$. set X extent = 1. # Whether the box should be periodic in X direction. set X periodic = false # Whether the box should be periodic in X direction in the lithosphere. set X periodic lithosphere = false # Number of cells in X direction of the lower box. The same number of # repetitions will be used in the upper box. set X repetitions = 1 # Extent of the box in y-direction. Units: $\si{m}$. set Y extent = 1. # Whether the box should be periodic in Y direction. set Y periodic = false # Whether the box should be periodic in Y direction in the lithosphere. # This value is ignored if the simulation is in 2d. set Y periodic lithosphere = false # Number of cells in Y direction of the lower box. If the simulation is in # 3d, the same number of repetitions will be used in the upper box. set Y repetitions = 1 # Number of cells in Y direction in the lithosphere. This value is ignored # if the simulation is in 3d. set Y repetitions lithosphere = 1 # Extent of the box in z-direction. This value is ignored if the # simulation is in 2d. Units: $\si{m}$. set Z extent = 1. # Whether the box should be periodic in Z direction. This value is ignored # if the simulation is in 2d. set Z periodic = false # Number of cells in Z direction of the lower box. This value is ignored # if the simulation is in 2d. set Z repetitions = 1 # Number of cells in Z direction in the lithosphere. This value is ignored # if the simulation is in 2d. set Z repetitions lithosphere = 1 end subsection Chunk # Radius at the bottom surface of the chunk. Units: $\si{m}$. set Chunk inner radius = 0. # Maximum latitude of the chunk. This value is ignored if the simulation # is in 2d. Units: degrees. set Chunk maximum latitude = 1. # Maximum longitude of the chunk. Units: degrees. set Chunk maximum longitude = 1. # Minimum latitude of the chunk. This value is ignored if the simulation # is in 2d. Units: degrees. set Chunk minimum latitude = 0. # Minimum longitude of the chunk. Units: degrees. set Chunk minimum longitude = 0. # Radius at the top surface of the chunk. Units: $\si{m}$. set Chunk outer radius = 1. # Number of cells in latitude. This value is ignored if the simulation is # in 2d set Latitude repetitions = 1 # Number of cells in longitude. set Longitude repetitions = 1 # Number of cells in radius. set Radius repetitions = 1 end subsection Ellipsoidal chunk # Bottom depth of model region. set Depth = 500000.0 # The number of subdivisions of the coarse (initial) mesh in depth. set Depth subdivisions = 1 # The number of subdivisions of the coarse (initial) mesh in the East-West # direction. set East-West subdivisions = 1 # Eccentricity of the ellipsoid. Zero is a perfect sphere, default # (8.1819190842622e-2) is WGS84. set Eccentricity = 8.1819190842622e-2 # Longitude:latitude in degrees of the North-East corner point of model # region.The North-East direction is positive. If one of the three corners # is not provided the missing corner value will be calculated so all faces # are parallel. set NE corner = # Longitude:latitude in degrees of the North-West corner point of model # region. The North-East direction is positive. If one of the three # corners is not provided the missing corner value will be calculated so # all faces are parallel. set NW corner = # The number of subdivisions of the coarse (initial) mesh in the # North-South direction. set North-South subdivisions = 1 # Longitude:latitude in degrees of the South-East corner point of model # region. The North-East direction is positive. If one of the three # corners is not provided the missing corner value will be calculated so # all faces are parallel. set SE corner = # Longitude:latitude in degrees of the South-West corner point of model # region. The North-East direction is positive. If one of the three # corners is not provided the missing corner value will be calculated so # all faces are parallel. set SW corner = # The semi-major axis (a) of an ellipsoid. This is the radius for a sphere # (eccentricity=0). Default WGS84 semi-major axis. set Semi-major axis = 6378137.0 end subsection Initial topography model # Select one of the following models: # # `ascii data': Implementation of a model in which the surface topography # is derived from a file containing data in ascii format. The following # geometry models are currently supported: box, chunk. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `Topography [m]' in a # 2d model and `x', `y', `Topography [m]' in a 3d model, which means that # there has to be a single column containing the topography. Note that the # data in the input file needs to be sorted in a specific order: the first # coordinate needs to ascend first, followed by the second in order to # assign the correct data to the prescribed coordinates. If you use a # spherical model, then the assumed grid changes. `x' will be replaced by # the azimuth angle in radians and `y' by the polar angle in radians # measured positive from the north pole. The grid will be assumed to be a # longitude-colatitude grid. Note that the order of spherical coordinates # is `phi', `theta' and not `theta', `phi', since this allows for # dimension independent expressions. # # `function': Implementation of a model in which the initial topography is # described by a function in cartesian or spherical coordinates. # # `prm polygon': An initial topography model that defines the initial # topography as constant inside each of a set of polygonal parts of the # surface. The polygons, and their associated surface elevation, are # defined in the `Geometry model/Initial topography/Prm polygon' section. # # `zero topography': Implementation of a model in which the initial # topography is zero. set Model name = zero topography subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which # the ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/geometry-model/initial-topography-model/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity # file name).\%s\%d where \%s is a string specifying the boundary of the # model according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = box_2d_%s.0.txt # Scalar factor, which is applied to the model data. You might want to # use this to scale the input to a reference model. Another way to use # this factor is to convert units of the input files. For instance, if # you provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `cartesian' and `spherical'. # `spherical' coordinates are interpreted as r,phi or r,phi,theta in # 2D/3D respectively with theta being the polar angle. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric # value everywhere the constant appears. These values can be defined # using this parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or # multiplication, as well as all of the common functions such as `sin' # or `cos'. In addition, it may contain expressions like `if(x>0, 1, # -1)' where the expression evaluates to the second argument if the # first argument is true, and to the third argument otherwise. For a # full overview of possible expressions accepted see the documentation # of the muparser library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The maximum value the topography given by the function can take. set Maximum topography value = 2000. # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' # (in 3d) for spatial coordinates and `t' for time. You can then use # these variable names in your function expression and they will be # replaced by the values of these variables at which the function is # currently evaluated. However, you can also choose a different set of # names for the independent variables at which to evaluate your function # expression. For example, if you work in spherical coordinates, you may # wish to set this input parameter to `r,phi,theta,t' and then use these # variable names in your function expression. set Variable names = x,y,t end subsection Prm polygon # Set the topography height and the polygon which should be set to that # height. The format is : "The topography height extgreater The point # list describing a polygon \& The next topography height extgreater # the next point list describing a polygon." The format for the point # list describing the polygon is "x1,y1;x2,y2". For example for two # triangular areas of 100 and -100 meters high set: '100 extgreater # 0,0;5,5;0,10 \& -100 extgreater 10,10;10,15;20,15'. Units of the # height are always in meters. The units of the coordinates are # dependent on the geometry model. In the box model they are in meters, # in the chunks they are in degrees, etc. Please refer to the manual of # the individual geometry model to so see how the topography is # implemented. set Topography parameters = end end subsection Sphere # Radius of the sphere. Units: $\si{m}$. set Radius = 6371000. end subsection Spherical shell # The number of cells in circumferential direction that are created in the # coarse mesh in 2d. If zero, this number is chosen automatically in a way # that produces meshes in which cells have a reasonable aspect ratio for # models in which the depth of the mantle is roughly that of the Earth. # For planets with much shallower mantles and larger cores, you may want # to chose a larger number to avoid cells that are elongated in tangential # and compressed in radial direction. # # In 3d, the number of cells is computed differently and does not have an # easy interpretation. Valid values for this parameter in 3d are 0 (let # this class choose), 6, 12 and 96. Other possible values may be discussed # in the documentation of the deal.II function GridGenerator::hyper_shell. # The parameter is best left at its default in 3d. # # In either case, this parameter is ignored unless the opening angle of # the domain is 360 degrees. This parameter is also ignored when using a # custom mesh subdivision scheme. set Cells along circumference = 12 # default: 0 # Choose how the spherical shell mesh is generated. By default, a coarse # mesh is generated with respect to the inner and outer radius, and an # initial number of cells along circumference. In the other cases, a # surface mesh is first generated and refined as desired, before it is # extruded radially following the specified subdivision scheme. set Custom mesh subdivision = none # Initial lateral refinement for the custom mesh subdivision schemes.The # number of refinement steps performed on the initial coarse surface mesh, # before the surface is extruded radially. This parameter allows the user # more control over the ratio between radial and lateral refinement of the # mesh. set Initial lateral refinement = 0 # Inner radius of the spherical shell. Units: $\si{m}$. # # \note{The default value of 3,481,000 m equals the radius of a sphere # with equal volume as Earth (i.e., 6371 km) minus the average depth of # the core-mantle boundary (i.e., 2890 km).} set Inner radius = 3450000.0 # default: 3481000. # List of radial values for the custom mesh scheme. Units: $\si{m}$. A # list of radial values subdivides the spherical shell at specified radii. # The list must be strictly ascending, and the first value must be greater # than the inner radius while the last must be less than the outer # radius. set List of radial values = # Number of slices for the custom mesh subdivision scheme. The number of # slices subdivides the spherical shell into N slices of equal thickness. # Must be greater than 0. set Number of slices = 1 # Opening angle in degrees of the section of the shell that we want to # build. The only opening angles that are allowed for this geometry are # 90, 180, and 360 in 2d; and 90 and 360 in 3d. Units: degrees. set Opening angle = 360 # default: 360. # Outer radius of the spherical shell. Units: $\si{m}$. # # \note{The default value of 6,336,000 m equals the radius of a sphere # with equal volume as Earth (i.e., 6371 km) minus the average depth of # the mantle-crust interface (i.e., 35 km).} set Outer radius = 6371000.0 # default: 6336000. end end subsection Gravity model # Select one of the following models: # # `ascii data': Gravity is read from a file that describes the reference # state. The default profile follows the preliminary reference Earth model # (PREM, Dziewonski and Anderson, 1981). Note the required format of the # input data: The first lines may contain any number of comments if they # begin with `#', but one of these lines needs to contain the number of # points in the reference state as for example `# POINTS: 3'. Following the # comment lines there has to be a single line containing the names of all # data columns, separated by arbitrarily many spaces. Column names are not # allowed to contain spaces. The file can contain unnecessary columns, but # for this plugin it needs to at least provide a column named `gravity'. # Note that the data lines in the file need to be sorted in order of # increasing depth from 0 to the maximal depth in the model domain. Points # in the model that are outside of the provided depth range will be assigned # the maximum or minimum depth values, respectively. Points do not need to # be equidistant, but the computation of properties is optimized in speed if # they are. # # `function': Gravity is given in terms of an explicit formula that is # elaborated in the parameters in section ``Gravity model|Function''. The # format of these functions follows the syntax understood by the muparser # library, see Section~\ref{sec:muparser-format}. # # `radial constant': A gravity model in which the gravity has a constant # magnitude and the direction is radial (pointing inward if the value is # positive). The magnitude is read from the parameter file in subsection # 'Radial constant'. # # `radial earth-like': This plugin has been removed due to its misleading # name. The included profile was hard-coded and was less earth-like than the # `ascii data' plugin, which uses the profile of the Preliminary Reference # Earth Model (PREM). Use `ascii data' instead of `radial earth-like'. # # `radial linear': A gravity model which is radial (pointing inward if the # gravity is positive) and the magnitude changes linearly with depth. The # magnitude of gravity at the surface and bottom is read from the input file # in a section ``Gravity model/Radial linear''. # # `vertical': A gravity model in which the gravity direction is vertical # (pointing downward for positive values) and at a constant magnitude by # default equal to one. set Model name = radial constant # default: unspecified subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/gravity-model/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = prem.txt # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0; 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Radial constant # Magnitude of the gravity vector in $m/s^2$. For positive values the # direction is radially inward towards the center of the earth. set Magnitude = 9.81 end subsection Radial linear # Magnitude of the radial gravity vector at the bottom of the domain. # `Bottom' means themaximum depth in the chosen geometry, and for example # represents the core-mantle boundary in the case of the `spherical shell' # geometry model, and the center in the case of the `sphere' geometry # model. Units: $m/s^2$ set Magnitude at bottom = 10.7 # Magnitude of the radial gravity vector at the surface of the domain. # Units: $m/s^2$ set Magnitude at surface = 9.8 end subsection Vertical # Value of the gravity vector in $m/s^2$ directed along negative y (2D) or # z (3D) axis (if the magnitude is positive. set Magnitude = 1. end end subsection Heating model # A comma separated list of heating models that will be used to calculate # the heating terms in the energy equation. The results of each of these # criteria, i.e., the heating source terms and the latent heat terms for the # left hand side will be added. # # The following heating models are available: # # `adiabatic heating': Implementation of a standard and a simplified model # of adiabatic heating. # # `adiabatic heating of melt': Implementation of a standard and a simplified # model of adiabatic heating of melt. The full model implements the heating # term # $\alpha T (-\phi \mathbf u_s \cdot \nabla p) + \alpha T (\phi \mathbf u_f # \cdot \nabla p)$. # For full adiabatic heating, this has to be used in combination with the # heating model `adiabatic heating' to also include adiabatic heating for # the solid part, and the full heating term is then $\alpha T ((1-\phi) # \mathbf u_s \cdot \nabla p) + \alpha T (\phi \mathbf u_f \cdot \nabla p)$. # # `compositional heating': Implementation of a model in which magnitude of # internal heat production is determined from fixed values assigned to each # compositional field. These values are interpreted as having units $W/m^3$. # # `constant heating': Implementation of a model in which the heating rate is # constant. # # `function': Implementation of a model in which the heating rate is given # in terms of an explicit formula that is elaborated in the parameters in # section ``Heating model|Function''. The format of these functions follows # the syntax understood by the muparser library, see # Section~\ref{sec:muparser-format}. # # The formula is interpreted as having units W/kg. # # Since the symbol $t$ indicating time may appear in the formulas for the # heating rate, it is interpreted as having units seconds unless the global # parameter ``Use years in output instead of seconds'' is set. # # `latent heat': Implementation of a standard model for latent heat. # # `latent heat melt': Implementation of a standard model for latent heat of # melting. This assumes that there is a compositional field called porosity, # and it uses the reaction term of this field (the fraction of material that # melted in the current time step) multiplied by a constant entropy change # for melting all of the material as source term of the heating model. # If there is no field called porosity, the heating terms are 0. # # `radioactive decay': Implementation of a model in which the internal # heating rate is radioactive decaying in the following rule: # \[(\text{initial concentration})\cdot 0.5^{\text{time}/(\text{half # life})}\] # The crust and mantle can have different concentrations, and the crust can # be defined either by depth or by a certain compositional field. # The formula is interpreted as having units W/kg. # # `shear heating': Implementation of a standard model for shear heating. # Adds the term: $ 2 \eta \left( \varepsilon - \frac{1}{3} \text{tr} # \varepsilon \mathbf 1 \right) : \left( \varepsilon - \frac{1}{3} \text{tr} # \varepsilon \mathbf 1 \right)$ to the right-hand side of the temperature # equation. # # `shear heating with melt': Implementation of a standard model for shear # heating of migrating melt, including bulk (compression) heating $\xi # \left( \nabla \cdot \mathbf u_s \right)^2 $ and heating due to melt # segregation $\frac{\eta_f \phi^2}{k} \left( \mathbf u_f - \mathbf u_s # \right)^2 $. For full shear heating, this has to be used in combination # with the heating model shear heating to also include shear heating for the # solid part. set List of model names = constant heating # default: subsection Adiabatic heating # A flag indicating whether the adiabatic heating should be simplified # from $\alpha T (\mathbf u \cdot \nabla p)$ to $ \alpha \rho T (\mathbf u # \cdot \mathbf g) $. set Use simplified adiabatic heating = false end subsection Adiabatic heating of melt # A flag indicating whether the adiabatic heating should be simplified # from $\alpha T (\mathbf u \cdot \nabla p)$ to $ \alpha \rho T (\mathbf u # \cdot \mathbf g) $. set Use simplified adiabatic heating = false end subsection Compositional heating # List of heat production per unit volume values for background and # compositional fields, for a total of N+1 values, where the first value # correponds to the background material, and N is the number of # compositional fields. Units: $W/m^3$. set Compositional heating values = 0. # A list of integers with as many entries as compositional fields plus # one. The first entry corresponds to the background material, each # following entry corresponds to a particular compositional field. If the # entry for a field is '1' this field is considered during the computation # of volume fractions, if it is '0' the field is ignored. This is useful # if some compositional fields are used to track properties like finite # strain that should not contribute to heat production. The first entry # determines whether the background field contributes to heat production # or not (essentially similar to setting its 'Compositional heating # values' to zero, but included for consistency in the length of the input # lists). set Use compositional field for heat production averaging = 1 end subsection Constant heating # The specific rate of heating due to radioactive decay (or other bulk # sources you may want to describe). This parameter corresponds to the # variable $H$ in the temperature equation stated in the manual, and the # heating term is $ ho H$. Units: W/kg. set Radiogenic heating rate = 406.442 # default: 0. end subsection Function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Latent heat melt # The entropy change for the phase transition from solid to melt. Units: # $J/(kg K)$. set Melting entropy change = -300. end subsection Radioactive decay # Which composition field should be treated as crust set Crust composition number = 0 # Whether crust defined by composition or depth set Crust defined by composition = false # Depth of the crust when crust if defined by depth. Units: $\si{m}$ set Crust depth = 0. # Half decay times. Units: (Seconds), or (Years) if set `use years instead # of seconds'. set Half decay times = # Heating rates of different elements (W/kg) set Heating rates = # Initial concentrations of different elements (ppm) set Initial concentrations crust = # Initial concentrations of different elements (ppm) set Initial concentrations mantle = # Number of radioactive elements set Number of elements = 0 end end subsection Initial composition model # A comma-separated list of initial composition models that together # describe the initial composition field. These plugins are loaded in the # order given, and modify the existing composition field via the operators # listed in 'List of model operators'. # # The following composition models are available: # # `adiabatic density': Specify the initial composition as the adiabatic # reference density at each position. Note that only the field with the name # 'density\_field' will be filled for all other fields this plugin returns # 0.0. # # `ascii data': Implementation of a model in which the initial composition # is derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `y', `composition1', # `composition2', etc. in a 2d model and `x', `y', `z', `composition1', # `composition2', etc. in a 3d model, according to the number of # compositional fields, which means that there has to be a single column for # every composition in the model.Note that the data in the input files need # to be sorted in a specific order: the first coordinate needs to ascend # first, followed by the second and the third at last in order to assign the # correct data to the prescribed coordinates. If you use a spherical model, # then the assumed grid changes. `x' will be replaced by the radial distance # of the point to the bottom of the model, `y' by the azimuth angle and `z' # by the polar angle measured positive from the north pole. The grid will be # assumed to be a latitude-longitude grid. Note that the order of spherical # coordinates is `r', `phi', `theta' and not `r', `theta', `phi', since this # allows for dimension independent expressions. # # `ascii data layered': Implementation of a model in which the initial # composition is derived from files containing data in ascii format. Each # file defines a surface on which compositional fields are defined. Between # the surfaces, the fields can be chosen to be constant (with a value # defined by the nearest shallower surface), or linearly interpolated # between surfaces. Note the required format of the input ascii data file: # The first lines may contain any number of comments if they begin with `#', # but one of these lines needs to contain the number of grid points in each # dimension as for example `# POINTS: 3 3'. The order of the data columns # has to be `x', `y', `composition1', `composition2' etc. in a 2d model and # `x', `y', `z', `composition1', `composition2' etc. in a 3d model; i.e. the # columns before the lcompositional field always contains the position of # the surface along the vertical direction. The first column needs to ascend # first, followed by the second in order to assign the correct data to the # prescribed coordinates. If you use a spherical model, then the assumed # grid changes. `x' will be replaced by the azimuth angle and `y' (if 3D) by # the polar angle measured positive from the north pole. The last column # will be the distance of the point from the origin (i.e. radial position). # The grid in this case will be a latitude-longitude grid. Note that the # order of spherical coordinates in 3D is `phi', `theta', `r', `T'and not # `theta', `phi', `r', `T' as this is more consistent with other ASPECT # plugins. Outside of the region defined by the grid, the plugin will use # the value at the edge of the region. # # `function': Specify the composition in terms of an explicit formula. The # format of these functions follows the syntax understood by the muparser # library, see Section~\ref{sec:muparser-format}. # # `porosity': A class that implements initial conditions for the porosity # field by computing the equilibrium melt fraction for the given initial # condition and reference pressure profile. Note that this plugin only works # if there is a compositional field called `porosity', and the used material # model implements the 'MeltFractionModel' interface. For all compositional # fields except porosity this plugin returns 0.0, and they are therefore not # changed as long as the default `add' operator is selected for this plugin. # # `world builder': Specify the initial composition through the World # Builder. More information on the World Builder can be found at # \url{https://geodynamicworldbuilder.github.io}. Make sure to specify the # location of the World Builder file in the parameter 'World builder file'. set List of model names = # A comma-separated list of operators that will be used to append the listed # composition models onto the previous models. If only one operator is # given, the same operator is applied to all models. set List of model operators = add # Select one of the following models: # # `adiabatic density': Specify the initial composition as the adiabatic # reference density at each position. Note that only the field with the name # 'density\_field' will be filled for all other fields this plugin returns # 0.0. # # `ascii data': Implementation of a model in which the initial composition # is derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `y', `composition1', # `composition2', etc. in a 2d model and `x', `y', `z', `composition1', # `composition2', etc. in a 3d model, according to the number of # compositional fields, which means that there has to be a single column for # every composition in the model.Note that the data in the input files need # to be sorted in a specific order: the first coordinate needs to ascend # first, followed by the second and the third at last in order to assign the # correct data to the prescribed coordinates. If you use a spherical model, # then the assumed grid changes. `x' will be replaced by the radial distance # of the point to the bottom of the model, `y' by the azimuth angle and `z' # by the polar angle measured positive from the north pole. The grid will be # assumed to be a latitude-longitude grid. Note that the order of spherical # coordinates is `r', `phi', `theta' and not `r', `theta', `phi', since this # allows for dimension independent expressions. # # `ascii data layered': Implementation of a model in which the initial # composition is derived from files containing data in ascii format. Each # file defines a surface on which compositional fields are defined. Between # the surfaces, the fields can be chosen to be constant (with a value # defined by the nearest shallower surface), or linearly interpolated # between surfaces. Note the required format of the input ascii data file: # The first lines may contain any number of comments if they begin with `#', # but one of these lines needs to contain the number of grid points in each # dimension as for example `# POINTS: 3 3'. The order of the data columns # has to be `x', `y', `composition1', `composition2' etc. in a 2d model and # `x', `y', `z', `composition1', `composition2' etc. in a 3d model; i.e. the # columns before the lcompositional field always contains the position of # the surface along the vertical direction. The first column needs to ascend # first, followed by the second in order to assign the correct data to the # prescribed coordinates. If you use a spherical model, then the assumed # grid changes. `x' will be replaced by the azimuth angle and `y' (if 3D) by # the polar angle measured positive from the north pole. The last column # will be the distance of the point from the origin (i.e. radial position). # The grid in this case will be a latitude-longitude grid. Note that the # order of spherical coordinates in 3D is `phi', `theta', `r', `T'and not # `theta', `phi', `r', `T' as this is more consistent with other ASPECT # plugins. Outside of the region defined by the grid, the plugin will use # the value at the edge of the region. # # `function': Specify the composition in terms of an explicit formula. The # format of these functions follows the syntax understood by the muparser # library, see Section~\ref{sec:muparser-format}. # # `porosity': A class that implements initial conditions for the porosity # field by computing the equilibrium melt fraction for the given initial # condition and reference pressure profile. Note that this plugin only works # if there is a compositional field called `porosity', and the used material # model implements the 'MeltFractionModel' interface. For all compositional # fields except porosity this plugin returns 0.0, and they are therefore not # changed as long as the default `add' operator is selected for this plugin. # # `world builder': Specify the initial composition through the World # Builder. More information on the World Builder can be found at # \url{https://geodynamicworldbuilder.github.io}. Make sure to specify the # location of the World Builder file in the parameter 'World builder file'. # # \textbf{Warning}: This parameter provides an old and deprecated way of # specifying initial composition models and shouldn't be used. Please use # 'List of model names' instead. set Model name = unspecified # A comma separated list denoting the method to be used to initialize a # composition field specified to be advected using the volume of fluid # method. # # The format of valid entries for this parameter is that of a map given as # ``key1:value1, key2:value2`` where each key must be the name of a # compositional field using the volume of fluid advection method, and the # value is one of ``composition`` or ``level set``. ``composition`` is the # default # # When ``composition is specified, the initial model is treated as a # standard composition field with bounds between 0 and 1 assumed, The # initial fluid fractions are then based on an iterated midpoint quadrature. # Resultant volume fractions outside of the bounds will be coerced to the # nearest valid value (ie 0 or 1). If ``level set`` is specified, the intial # data will be assumed to be in the form of a signed distance level set # function (i.e. a function which is positive when in the fluid, negative # outside, and zero on the interface and the magnitude is always the # distance to the interface so the gradient is one everywhere). set Volume of fluid intialization type = subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/initial-composition/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = initial_composition_top_mantle_box_3d.txt # The file names of the model data (comma separated). set Data file names = initial_composition_top_mantle_box_3d.txt # Method to interpolate between layer boundaries. Select from piecewise # constant or linear. Piecewise constant takes the value from the nearest # layer boundary above the data point. The linear option interpolates # linearly between layer boundaries. Above and below the domain given by # the layer boundaries, the values aregiven by the top and bottom layer # boundary. set Interpolation scheme = linear # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `cartesian', `spherical', and # `depth'. `spherical' coordinates are interpreted as r,phi or r,phi,theta # in 2D/3D respectively with theta being the polar angle. `depth' will # create a function, in which only the first parameter is non-zero, which # is interpreted to be the depth of the point. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end end subsection Initial temperature model # A comma-separated list of initial temperature models that will be used to # initialize the temperature. These plugins are loaded in the order given, # and modify the existing temperature field via the operators listed in # 'List of model operators'. # # The following initial temperature models are available: # # `S40RTS perturbation': An initial temperature field in which the # temperature is perturbed following the S20RTS or S40RTS shear wave # velocity model by Ritsema and others, which can be downloaded here # \url{http://www.earth.lsa.umich.edu/~jritsema/research.html}. Information # on the vs model can be found in Ritsema, J., Deuss, A., van Heijst, H.J. # \& Woodhouse, J.H., 2011. S40RTS: a degree-40 shear-velocity model for the # mantle from new Rayleigh wave dispersion, teleseismic traveltime and # normal-mode splitting function measurements, Geophys. J. Int. 184, # 1223-1236. The scaling between the shear wave perturbation and the density # perturbation can be constant and set by the user with the 'Vs to density # scaling' parameter or depth-dependent and read in from a file. To convert # density the user can specify the 'Thermal expansion coefficient in initial # temperature scaling' parameter. The scaling is as follows: $\delta ln \rho # (r,\theta,\phi) = \xi \cdot \delta ln v_s(r,\theta, \phi)$ and $\delta # T(r,\theta,\phi) = - \frac{1}{\alpha} \delta ln \rho(r,\theta,\phi)$. # $\xi$ is the `vs to density scaling' parameter and $\alpha$ is the # 'Thermal expansion coefficient in initial temperature scaling' parameter. # The temperature perturbation is added to an otherwise constant temperature # (incompressible model) or adiabatic reference profile (compressible # model). If a depth is specified in 'Remove temperature heterogeneity down # to specified depth', there is no temperature perturbation prescribed down # to that depth. # Note the required file format if the vs to density scaling is read in from # a file: The first lines may contain any number of comments if they begin # with '#', but one of these lines needs to contain the number of points in # the reference state as for example '# POINTS: 3'. Following the comment # lines there has to be a single line containing the names of all data # columns, separated by arbitrarily many spaces. Column names are not # allowed to contain spaces. The file can contain unnecessary columns, but # for this plugin it needs to at least provide the columns named `depth' and # `vs\_to\_density'. Note that the data lines in the file need to be sorted # in order of increasing depth from 0 to the maximal depth in the model # domain. Points in the model that are outside of the provided depth range # will be assigned the maximum or minimum depth values, respectively. Points # do not need to be equidistant, but the computation of properties is # optimized in speed if they are. # If the plugin is used in 2D it will use an equatorial slice of the seismic # tomography model. # # `SAVANI perturbation': An initial temperature field in which the # temperature is perturbed following the SAVANI shear wave velocity model by # Auer and others, which can be downloaded here # \url{http://n.ethz.ch/~auerl/savani.tar.bz2}. Information on the vs model # can be found in Auer, L., Boschi, L., Becker, T.W., Nissen-Meyer, T. \& # Giardini, D., 2014. Savani: A variable resolution whole-mantle model of # anisotropic shear velocity variations based on multiple data sets. Journal # of Geophysical Research: Solid Earth 119.4 (2014): 3006-3034. The scaling # between the shear wave perturbation and the density perturbation can be # constant and set by the user with the 'Vs to density scaling' parameter or # depth-dependent and read in from a file. To convert density the user can # specify the 'Thermal expansion coefficient in initial temperature scaling' # parameter. The scaling is as follows: $\delta ln \rho (r,\theta,\phi) = # \xi \cdot \delta ln v_s(r,\theta, \phi)$ and $\delta T(r,\theta,\phi) = - # \frac{1}{\alpha} \delta ln \rho(r,\theta,\phi)$. $\xi$ is the `vs to # density scaling' parameter and $\alpha$ is the 'Thermal expansion # coefficient in initial temperature scaling' parameter. The temperature # perturbation is added to an otherwise constant temperature (incompressible # model) or adiabatic reference profile (compressible model).If a depth is # specified in 'Remove temperature heterogeneity down to specified depth', # there is no temperature perturbation prescribed down to that depth. # Note the required file format if the vs to density scaling is read in from # a file: The first lines may contain any number of comments if they begin # with '#', but one of these lines needs to contain the number of points in # the reference state as for example '# POINTS: 3'. Following the comment # lines there has to be a single line containing the names of all data # columns, separated by arbitrarily many spaces. Column names are not # allowed to contain spaces. The file can contain unnecessary columns, but # for this plugin it needs to at least provide the columns named `depth' and # `vs\_to\_density'. Note that the data lines in the file need to be sorted # in order of increasing depth from 0 to the maximal depth in the model # domain. Points in the model that are outside of the provided depth range # will be assigned the maximum or minimum depth values, respectively. Points # do not need to be equidistant, but the computation of properties is # optimized in speed if they are. # # `adiabatic': Temperature is prescribed as an adiabatic profile with upper # and lower thermal boundary layers, whose ages are given as input # parameters. # # `adiabatic boundary': An initial temperature condition that allows for # discretizing the model domain into two layers separated by a user-defined # isothermal boundary. The user includes an input ascii data file that is # formatted as 3 columns of `longitude(radians)', `colatitude(radians)', and # `isotherm depth(meters)', where `isotherm depth' represents the depth of # an initial temperature of 1673.15 K (by default). The first lines in the # data file may contain any number of comments if they begin with `#', but # one of these lines needs to contain the number of grid points in each # dimension as for example `# POINTS: 69 121'. Note that the coordinates # need to be sorted in a specific order: the `longitude' coordinate needs to # ascend first, followed by the `colatitude' coordinate in order to assign # the correct data (isotherm depth) to the prescribed coordinates. The # temperature is defined from the surface (273.15 K) to the isotherm depth # (1673.15 K) as a linear gradient. Below the isotherm depth the temperature # increases approximately adiabatically (0.0005 K per meter). This plugin # should work for all geometry models, but is currently only tested for # spherical models. # # `ascii data': Implementation of a model in which the initial temperature # is derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `y', `Temperature [K]' in # a 2d model and `x', `y', `z', `Temperature [K]' in a 3d model, which # means that there has to be a single column containing the temperature. # Note that the data in the input files need to be sorted in a specific # order: the first coordinate needs to ascend first, followed by the second # and the third at last in order to assign the correct data to the # prescribed coordinates. If you use a spherical model, then the assumed # grid changes. `x' will be replaced by the radial distance of the point to # the bottom of the model, `y' by the azimuth angle and `z' by the polar # angle measured positive from the north pole. The grid will be assumed to # be a latitude-longitude grid. Note that the order of spherical coordinates # is `r', `phi', `theta' and not `r', `theta', `phi', since this allows for # dimension independent expressions. # # `ascii data layered': Implementation of a model in which the initial # temperature is derived from files containing data in ascii format. Each # file defines a surface on which temperature is defined. Between the # surfaces, the temperatures can be chosen to be constant (with a value # defined by the nearest shallower surface), or linearly interpolated # between surfaces. Note the required format of the input ascii data file: # The first lines may contain any number of comments if they begin with `#', # but one of these lines needs to contain the number of grid points in each # dimension as for example `# POINTS: 3 3'. The order of the data columns # has to be `x', `y', `Temperature [K]' in a 2d model and `x', `y', `z', # `Temperature [K]' in a 3d model; i.e. the last two columns always contain # the position of the isotherm along the vertical direction, and the # temperature at that point. The first column needs to ascend first, # followed by the second in order to assign the correct data to the # prescribed coordinates. If you use a spherical model, then the assumed # grid changes. `x' will be replaced by the azimuth angle and `y' (if 3D) by # the polar angle measured positive from the north pole. The last column # will be the distance of the point from the origin (i.e. radial position). # The grid in this case will be a latitude-longitude grid. Note that the # order of spherical coordinates in 3D is `phi', `theta', `r', `T'and not # `theta', `phi', `r', `T' as this is more consistent with other ASPECT # plugins. Outside of the region defined by the grid, the plugin will use # the value at the edge of the region. # # `ascii profile': Implementation of a model in which the initial # temperature is read from a file that provides these values as a function # of depth. Note the required format of the input data: The first lines may # contain any number of comments if they begin with `#', but one of these # lines needs to contain the number of points in the temperature profile, # for example `# POINTS: 10'. Following the comment lines, there has to be a # single line containing the names of all data columns, separated by # arbitrarily many spaces. Column names are not allowed to contain spaces. # The file can contain unnecessary columns, but for this plugin it needs to # at least provide columns named `depth' and`temperature'.Note that the data # lines in the file need to be sorted in order of increasing depth from 0 to # the maximal depth in the model domain. Points in the model that are # outside of the provided depth range will be assigned the maximum or # minimum depth values, respectively. Points do not need to be equidistant, # but the computation of properties is optimized in speed if they are. # # `conductive harmonic perturbation': An initial temperature field in which # the temperature is perturbed following a harmonic function (spherical # harmonic or sine depending on geometry and dimension) in lateral and # radial direction from an otherwise constant temperature (incompressible # model) or adiabatic reference profile (compressible model). # # `continental geotherm': This is a temperature initial condition that # computes a continental geotherm based on the solution of the steady-state # conductive equation $k\frac{d^2 T}{dy^2}+\rho H = 0$ as described in e.g. # Turcotte and Schubert, Ch. 4.6, or Chapman (1986). As boundary conditions, # we take the surface temperature and the temperature of the # Lithosphere-Asthenosphere Boundary (LAB). # The geotherm is computed for a homogeneous lithosphere composed of an # upper crust, lower crust and mantle layer. The crustal layers are assumed # to have a constant radioactive heating, and all layers are assumed to have # a constant thermal conductivity. Layer thicknesses, surface temperature # and LAB temperature should be specified by the user. For consistency, the # density, heat production and thermal conductivity of each layer are read # from the visco plastic material model and the compositional heating model. # # For any depths below the depth of the LAB, a unrealistically high # temperature is returned, such that this plugin can be combined with # another temperature plugin through the 'minimum' operator. # Note that the current implementation only works for a 3-layer lithosphere, # even though in principle the heat conduction equation can be solved for # any number of layers. The naming of the compositional fields that # represent the layers is also very specific, namely `upper\_crust', # `lower\_crust', and `lithospheric\_mantle'. # Make sure the top and bottom temperatures of the lithosphere agree with # temperatures set in for example the temperature boundary conditions. # # `function': Specify the initial temperature in terms of an explicit # formula. The format of these functions follows the syntax understood by # the muparser library, see Section~\ref{sec:muparser-format}. # # `harmonic perturbation': An initial temperature field in which the # temperature is perturbed following a harmonic function (spherical harmonic # or sine depending on geometry and dimension) in lateral and radial # direction from an otherwise constant temperature (incompressible model) or # adiabatic reference profile (compressible model). # # `inclusion shape perturbation': An initial temperature field in which # there is an inclusion in a constant-temperature box field. The size, # shape, gradient, position, and temperature of the inclusion are defined by # parameters. # # `lithosphere mask': Implementation of a model in which the initial # temperature is set to a specified lithosphere temperature above the # lithosphere-asthenosphere boundary (specified by an ascii file or maximum # lithosphere depth value). Below this the initial temperature is set as # NaN. Note the required format of the input data file: The first lines may # contain any number of comments if they begin with ‘#’, but one of # these lines needs to contain the number of grid points in each dimension # as for example‘# POINTS: 3 3’. For a spherical model, the order of the # data columns has to be'phi', 'theta','depth (m)', where phi is the # azimuth angle and theta is the polar angle measured positive from the # north pole. This plug-in can be combined with another using the 'replace # if valid' operator. # # `mandelbox': Fractal-shaped temperature field. # # `patch on S40RTS': Implementation of a model in which the initial # temperature is derived from a file containing shear wave velocity # perturbations in ascii format (e.g. a high resolution upper mantle # tomography) combined with S40RTS. Note the required format of the input # ascii input data: The first lines may contain any number of comments if # they begin with '#', but one of these lines needs to contain the number of # grid points in each dimension as for example '# POINTS: 3 3 3'. The order # of the data columns has to be `x', `y', `z', 'Vs Perturbation' in a 3d # model, which means that there has to be a single column containing the # temperature. Note that the data in the input files need to be sorted in a # specific order: the first coordinate needs to ascend first, followed by # the second and the third at last in order to assign the correct data to # the prescribed coordinates. In the spherical model data will be handled as # Cartesian, however, `x' will be replaced by the radial distance of the # point to the bottom of the model, `y' by the azimuth angle and `z' by the # polar angle measured positive from the north pole. The grid will be # assumed to be a latitude-longitude grid. Note that the order of spherical # coordinates is `r', `phi', `theta' and not `r', `theta', `phi', since this # allows for dimension independent expressions. See S40RTS documentation for # details on input parameters in the S40RTS perturbation subsection. The # boundary between the two tomography models is smoothed using a depth # weighted combination of Vs values within the region of smoothing. # # `perturbed box': An initial temperature field in which the temperature is # perturbed slightly from an otherwise constant value equal to one. The # perturbation is chosen in such a way that the initial temperature is # constant to one along the entire boundary. # # `polar box': An initial temperature field in which the temperature is # perturbed slightly from an otherwise constant value equal to one. The # perturbation is such that there are two poles on opposing corners of the # box. # # `spherical gaussian perturbation': An initial temperature field in which # the temperature is perturbed by a single Gaussian added to an otherwise # spherically symmetric state. Additional parameters are read from the # parameter file in subsection 'Spherical gaussian perturbation'. # # `spherical hexagonal perturbation': An initial temperature field in which # the temperature is perturbed following an $N$-fold pattern in a specified # direction from an otherwise spherically symmetric state. The class's name # comes from previous versions when the only option was $N=6$. # # `world builder': Specify the initial temperature through the World # Builder. More information on the World Builder can be found at # \url{https://geodynamicworldbuilder.github.io}. Make sure to specify the # location of the World Builder file in the parameter 'World builder file'. set List of model names = # A comma-separated list of operators that will be used to append the listed # temperature models onto the previous models. If only one operator is # given, the same operator is applied to all models. set List of model operators = add # Select one of the following models: # # `S40RTS perturbation': An initial temperature field in which the # temperature is perturbed following the S20RTS or S40RTS shear wave # velocity model by Ritsema and others, which can be downloaded here # \url{http://www.earth.lsa.umich.edu/~jritsema/research.html}. Information # on the vs model can be found in Ritsema, J., Deuss, A., van Heijst, H.J. # \& Woodhouse, J.H., 2011. S40RTS: a degree-40 shear-velocity model for the # mantle from new Rayleigh wave dispersion, teleseismic traveltime and # normal-mode splitting function measurements, Geophys. J. Int. 184, # 1223-1236. The scaling between the shear wave perturbation and the density # perturbation can be constant and set by the user with the 'Vs to density # scaling' parameter or depth-dependent and read in from a file. To convert # density the user can specify the 'Thermal expansion coefficient in initial # temperature scaling' parameter. The scaling is as follows: $\delta ln \rho # (r,\theta,\phi) = \xi \cdot \delta ln v_s(r,\theta, \phi)$ and $\delta # T(r,\theta,\phi) = - \frac{1}{\alpha} \delta ln \rho(r,\theta,\phi)$. # $\xi$ is the `vs to density scaling' parameter and $\alpha$ is the # 'Thermal expansion coefficient in initial temperature scaling' parameter. # The temperature perturbation is added to an otherwise constant temperature # (incompressible model) or adiabatic reference profile (compressible # model). If a depth is specified in 'Remove temperature heterogeneity down # to specified depth', there is no temperature perturbation prescribed down # to that depth. # Note the required file format if the vs to density scaling is read in from # a file: The first lines may contain any number of comments if they begin # with '#', but one of these lines needs to contain the number of points in # the reference state as for example '# POINTS: 3'. Following the comment # lines there has to be a single line containing the names of all data # columns, separated by arbitrarily many spaces. Column names are not # allowed to contain spaces. The file can contain unnecessary columns, but # for this plugin it needs to at least provide the columns named `depth' and # `vs\_to\_density'. Note that the data lines in the file need to be sorted # in order of increasing depth from 0 to the maximal depth in the model # domain. Points in the model that are outside of the provided depth range # will be assigned the maximum or minimum depth values, respectively. Points # do not need to be equidistant, but the computation of properties is # optimized in speed if they are. # If the plugin is used in 2D it will use an equatorial slice of the seismic # tomography model. # # `SAVANI perturbation': An initial temperature field in which the # temperature is perturbed following the SAVANI shear wave velocity model by # Auer and others, which can be downloaded here # \url{http://n.ethz.ch/~auerl/savani.tar.bz2}. Information on the vs model # can be found in Auer, L., Boschi, L., Becker, T.W., Nissen-Meyer, T. \& # Giardini, D., 2014. Savani: A variable resolution whole-mantle model of # anisotropic shear velocity variations based on multiple data sets. Journal # of Geophysical Research: Solid Earth 119.4 (2014): 3006-3034. The scaling # between the shear wave perturbation and the density perturbation can be # constant and set by the user with the 'Vs to density scaling' parameter or # depth-dependent and read in from a file. To convert density the user can # specify the 'Thermal expansion coefficient in initial temperature scaling' # parameter. The scaling is as follows: $\delta ln \rho (r,\theta,\phi) = # \xi \cdot \delta ln v_s(r,\theta, \phi)$ and $\delta T(r,\theta,\phi) = - # \frac{1}{\alpha} \delta ln \rho(r,\theta,\phi)$. $\xi$ is the `vs to # density scaling' parameter and $\alpha$ is the 'Thermal expansion # coefficient in initial temperature scaling' parameter. The temperature # perturbation is added to an otherwise constant temperature (incompressible # model) or adiabatic reference profile (compressible model).If a depth is # specified in 'Remove temperature heterogeneity down to specified depth', # there is no temperature perturbation prescribed down to that depth. # Note the required file format if the vs to density scaling is read in from # a file: The first lines may contain any number of comments if they begin # with '#', but one of these lines needs to contain the number of points in # the reference state as for example '# POINTS: 3'. Following the comment # lines there has to be a single line containing the names of all data # columns, separated by arbitrarily many spaces. Column names are not # allowed to contain spaces. The file can contain unnecessary columns, but # for this plugin it needs to at least provide the columns named `depth' and # `vs\_to\_density'. Note that the data lines in the file need to be sorted # in order of increasing depth from 0 to the maximal depth in the model # domain. Points in the model that are outside of the provided depth range # will be assigned the maximum or minimum depth values, respectively. Points # do not need to be equidistant, but the computation of properties is # optimized in speed if they are. # # `adiabatic': Temperature is prescribed as an adiabatic profile with upper # and lower thermal boundary layers, whose ages are given as input # parameters. # # `adiabatic boundary': An initial temperature condition that allows for # discretizing the model domain into two layers separated by a user-defined # isothermal boundary. The user includes an input ascii data file that is # formatted as 3 columns of `longitude(radians)', `colatitude(radians)', and # `isotherm depth(meters)', where `isotherm depth' represents the depth of # an initial temperature of 1673.15 K (by default). The first lines in the # data file may contain any number of comments if they begin with `#', but # one of these lines needs to contain the number of grid points in each # dimension as for example `# POINTS: 69 121'. Note that the coordinates # need to be sorted in a specific order: the `longitude' coordinate needs to # ascend first, followed by the `colatitude' coordinate in order to assign # the correct data (isotherm depth) to the prescribed coordinates. The # temperature is defined from the surface (273.15 K) to the isotherm depth # (1673.15 K) as a linear gradient. Below the isotherm depth the temperature # increases approximately adiabatically (0.0005 K per meter). This plugin # should work for all geometry models, but is currently only tested for # spherical models. # # `ascii data': Implementation of a model in which the initial temperature # is derived from files containing data in ascii format. Note the required # format of the input data: The first lines may contain any number of # comments if they begin with `#', but one of these lines needs to contain # the number of grid points in each dimension as for example `# POINTS: 3 # 3'. The order of the data columns has to be `x', `y', `Temperature [K]' in # a 2d model and `x', `y', `z', `Temperature [K]' in a 3d model, which # means that there has to be a single column containing the temperature. # Note that the data in the input files need to be sorted in a specific # order: the first coordinate needs to ascend first, followed by the second # and the third at last in order to assign the correct data to the # prescribed coordinates. If you use a spherical model, then the assumed # grid changes. `x' will be replaced by the radial distance of the point to # the bottom of the model, `y' by the azimuth angle and `z' by the polar # angle measured positive from the north pole. The grid will be assumed to # be a latitude-longitude grid. Note that the order of spherical coordinates # is `r', `phi', `theta' and not `r', `theta', `phi', since this allows for # dimension independent expressions. # # `ascii data layered': Implementation of a model in which the initial # temperature is derived from files containing data in ascii format. Each # file defines a surface on which temperature is defined. Between the # surfaces, the temperatures can be chosen to be constant (with a value # defined by the nearest shallower surface), or linearly interpolated # between surfaces. Note the required format of the input ascii data file: # The first lines may contain any number of comments if they begin with `#', # but one of these lines needs to contain the number of grid points in each # dimension as for example `# POINTS: 3 3'. The order of the data columns # has to be `x', `y', `Temperature [K]' in a 2d model and `x', `y', `z', # `Temperature [K]' in a 3d model; i.e. the last two columns always contain # the position of the isotherm along the vertical direction, and the # temperature at that point. The first column needs to ascend first, # followed by the second in order to assign the correct data to the # prescribed coordinates. If you use a spherical model, then the assumed # grid changes. `x' will be replaced by the azimuth angle and `y' (if 3D) by # the polar angle measured positive from the north pole. The last column # will be the distance of the point from the origin (i.e. radial position). # The grid in this case will be a latitude-longitude grid. Note that the # order of spherical coordinates in 3D is `phi', `theta', `r', `T'and not # `theta', `phi', `r', `T' as this is more consistent with other ASPECT # plugins. Outside of the region defined by the grid, the plugin will use # the value at the edge of the region. # # `ascii profile': Implementation of a model in which the initial # temperature is read from a file that provides these values as a function # of depth. Note the required format of the input data: The first lines may # contain any number of comments if they begin with `#', but one of these # lines needs to contain the number of points in the temperature profile, # for example `# POINTS: 10'. Following the comment lines, there has to be a # single line containing the names of all data columns, separated by # arbitrarily many spaces. Column names are not allowed to contain spaces. # The file can contain unnecessary columns, but for this plugin it needs to # at least provide columns named `depth' and`temperature'.Note that the data # lines in the file need to be sorted in order of increasing depth from 0 to # the maximal depth in the model domain. Points in the model that are # outside of the provided depth range will be assigned the maximum or # minimum depth values, respectively. Points do not need to be equidistant, # but the computation of properties is optimized in speed if they are. # # `conductive harmonic perturbation': An initial temperature field in which # the temperature is perturbed following a harmonic function (spherical # harmonic or sine depending on geometry and dimension) in lateral and # radial direction from an otherwise constant temperature (incompressible # model) or adiabatic reference profile (compressible model). # # `continental geotherm': This is a temperature initial condition that # computes a continental geotherm based on the solution of the steady-state # conductive equation $k\frac{d^2 T}{dy^2}+\rho H = 0$ as described in e.g. # Turcotte and Schubert, Ch. 4.6, or Chapman (1986). As boundary conditions, # we take the surface temperature and the temperature of the # Lithosphere-Asthenosphere Boundary (LAB). # The geotherm is computed for a homogeneous lithosphere composed of an # upper crust, lower crust and mantle layer. The crustal layers are assumed # to have a constant radioactive heating, and all layers are assumed to have # a constant thermal conductivity. Layer thicknesses, surface temperature # and LAB temperature should be specified by the user. For consistency, the # density, heat production and thermal conductivity of each layer are read # from the visco plastic material model and the compositional heating model. # # For any depths below the depth of the LAB, a unrealistically high # temperature is returned, such that this plugin can be combined with # another temperature plugin through the 'minimum' operator. # Note that the current implementation only works for a 3-layer lithosphere, # even though in principle the heat conduction equation can be solved for # any number of layers. The naming of the compositional fields that # represent the layers is also very specific, namely `upper\_crust', # `lower\_crust', and `lithospheric\_mantle'. # Make sure the top and bottom temperatures of the lithosphere agree with # temperatures set in for example the temperature boundary conditions. # # `function': Specify the initial temperature in terms of an explicit # formula. The format of these functions follows the syntax understood by # the muparser library, see Section~\ref{sec:muparser-format}. # # `harmonic perturbation': An initial temperature field in which the # temperature is perturbed following a harmonic function (spherical harmonic # or sine depending on geometry and dimension) in lateral and radial # direction from an otherwise constant temperature (incompressible model) or # adiabatic reference profile (compressible model). # # `inclusion shape perturbation': An initial temperature field in which # there is an inclusion in a constant-temperature box field. The size, # shape, gradient, position, and temperature of the inclusion are defined by # parameters. # # `lithosphere mask': Implementation of a model in which the initial # temperature is set to a specified lithosphere temperature above the # lithosphere-asthenosphere boundary (specified by an ascii file or maximum # lithosphere depth value). Below this the initial temperature is set as # NaN. Note the required format of the input data file: The first lines may # contain any number of comments if they begin with ‘#’, but one of # these lines needs to contain the number of grid points in each dimension # as for example‘# POINTS: 3 3’. For a spherical model, the order of the # data columns has to be'phi', 'theta','depth (m)', where phi is the # azimuth angle and theta is the polar angle measured positive from the # north pole. This plug-in can be combined with another using the 'replace # if valid' operator. # # `mandelbox': Fractal-shaped temperature field. # # `patch on S40RTS': Implementation of a model in which the initial # temperature is derived from a file containing shear wave velocity # perturbations in ascii format (e.g. a high resolution upper mantle # tomography) combined with S40RTS. Note the required format of the input # ascii input data: The first lines may contain any number of comments if # they begin with '#', but one of these lines needs to contain the number of # grid points in each dimension as for example '# POINTS: 3 3 3'. The order # of the data columns has to be `x', `y', `z', 'Vs Perturbation' in a 3d # model, which means that there has to be a single column containing the # temperature. Note that the data in the input files need to be sorted in a # specific order: the first coordinate needs to ascend first, followed by # the second and the third at last in order to assign the correct data to # the prescribed coordinates. In the spherical model data will be handled as # Cartesian, however, `x' will be replaced by the radial distance of the # point to the bottom of the model, `y' by the azimuth angle and `z' by the # polar angle measured positive from the north pole. The grid will be # assumed to be a latitude-longitude grid. Note that the order of spherical # coordinates is `r', `phi', `theta' and not `r', `theta', `phi', since this # allows for dimension independent expressions. See S40RTS documentation for # details on input parameters in the S40RTS perturbation subsection. The # boundary between the two tomography models is smoothed using a depth # weighted combination of Vs values within the region of smoothing. # # `perturbed box': An initial temperature field in which the temperature is # perturbed slightly from an otherwise constant value equal to one. The # perturbation is chosen in such a way that the initial temperature is # constant to one along the entire boundary. # # `polar box': An initial temperature field in which the temperature is # perturbed slightly from an otherwise constant value equal to one. The # perturbation is such that there are two poles on opposing corners of the # box. # # `spherical gaussian perturbation': An initial temperature field in which # the temperature is perturbed by a single Gaussian added to an otherwise # spherically symmetric state. Additional parameters are read from the # parameter file in subsection 'Spherical gaussian perturbation'. # # `spherical hexagonal perturbation': An initial temperature field in which # the temperature is perturbed following an $N$-fold pattern in a specified # direction from an otherwise spherically symmetric state. The class's name # comes from previous versions when the only option was $N=6$. # # `world builder': Specify the initial temperature through the World # Builder. More information on the World Builder can be found at # \url{https://geodynamicworldbuilder.github.io}. Make sure to specify the # location of the World Builder file in the parameter 'World builder file'. # # \textbf{Warning}: This parameter provides an old and deprecated way of # specifying initial temperature models and shouldn't be used. Please use # 'List of model names' instead. set Model name = harmonic perturbation # default: unspecified subsection Adiabatic # The age of the lower thermal boundary layer, used for the calculation of # the half-space cooling model temperature. Units: years if the 'Use years # in output instead of seconds' parameter is set; seconds otherwise. set Age bottom boundary layer = 0. # The age of the upper thermal boundary layer, used for the calculation of # the half-space cooling model temperature. Units: years if the 'Use years # in output instead of seconds' parameter is set; seconds otherwise. set Age top boundary layer = 0. # The amplitude (in K) of the initial spherical temperature perturbation # at the bottom of the model domain. This perturbation will be added to # the adiabatic temperature profile, but not to the bottom thermal # boundary layer. Instead, the maximum of the perturbation and the bottom # boundary layer temperature will be used. set Amplitude = 0. # Where the initial temperature perturbation should be placed. If `center' # is given, then the perturbation will be centered along a `midpoint' of # some sort of the bottom boundary. For example, in the case of a box # geometry, this is the center of the bottom face; in the case of a # spherical shell geometry, it is along the inner surface halfway between # the bounding radial lines. set Position = center # The Radius (in m) of the initial spherical temperature perturbation at # the bottom of the model domain. set Radius = 0. # If this value is larger than 0, the initial temperature profile will not # be adiabatic, but subadiabatic. This value gives the maximal deviation # from adiabaticity. Set to 0 for an adiabatic temperature profile. Units: # $\si{K}$. # # The function object in the Function subsection represents the # compositional fields that will be used as a reference profile for # calculating the thermal diffusivity. This function is one-dimensional # and depends only on depth. The format of this functions follows the # syntax understood by the muparser library, see # Section~\ref{sec:muparser-format}. set Subadiabaticity = 0. subsection Function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric # value everywhere the constant appears. These values can be defined # using this parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or # multiplication, as well as all of the common functions such as `sin' # or `cos'. In addition, it may contain expressions like `if(x>0, 1, # -1)' where the expression evaluates to the second argument if the # first argument is true, and to the third argument otherwise. For a # full overview of possible expressions accepted see the documentation # of the muparser library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' # (in 3d) for spatial coordinates and `t' for time. You can then use # these variable names in your function expression and they will be # replaced by the values of these variables at which the function is # currently evaluated. However, you can also choose a different set of # names for the independent variables at which to evaluate your function # expression. For example, if you work in spherical coordinates, you may # wish to set this input parameter to `r,phi,theta,t' and then use these # variable names in your function expression. set Variable names = x,t end end subsection Adiabatic boundary # The value of the adiabatic temperature gradient. Units: $K m^{-1}$. set Adiabatic temperature gradient = 0.0005 # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/adiabatic-boundary/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = adiabatic_boundary.txt # The value of the isothermal boundary temperature. Units: $\si{K}$. set Isotherm temperature = 1673.15 # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. # The value of the surface temperature. Units: $\si{K}$. set Surface temperature = 273.15 end subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/ascii-data/test/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = initial_isotherm_500K_box_3d.txt # The file names of the model data (comma separated). set Data file names = initial_isotherm_500K_box_3d.txt # Method to interpolate between layer boundaries. Select from piecewise # constant or linear. Piecewise constant takes the value from the nearest # layer boundary above the data point. The linear option interpolates # linearly between layer boundaries. Above and below the domain given by # the layer boundaries, the values aregiven by the top and bottom layer # boundary. set Interpolation scheme = linear # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Ascii profile # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/ascii-profile/tests/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = simple_test.txt # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Conductive harmonic perturbation set Additional lateral wave number one = 3 set Additional lateral wave number two = 3 # Doubled first lateral wave number of the conductive harmonic # perturbation. Equals the spherical harmonic degree in 3D spherical # shells. In all other cases one equals half of a sine period over the # model domain. This allows for single up-/downswings. Negative numbers # reverse the sign of the perturbation but are not allowed for the # spherical harmonic case. set Lateral wave number one = 3 # Doubled second lateral wave number of the conductive harmonic # perturbation. Equals the spherical harmonic order in 3D spherical # shells. In all other cases one equals half of a sine period over the # model domain. This allows for single up-/downswings. Negative numbers # reverse the sign of the perturbation. set Lateral wave number two = 2 # The magnitude of the conductive temperature profile. set Magnitude of conductive temperature profile = 1600.0 set Magnitude of the first wave number component = 1.0 set Magnitude of the second wave number component = 1.0 set Prefactor of additional lateral wave number one = 1.0 set Prefactor of additional lateral wave number two = 1.0 set Prefactor of lateral wave number one = 1.0 set Prefactor of lateral wave number two = 1.0 set Use double harmonic perturbations = false # Doubled radial wave number of the conductive harmonic perturbation. One # equals half of a sine period over the model domain. This allows for # single up-/downswings. Negative numbers reverse the sign of the # perturbation. set Vertical wave number = 1 end subsection Continental geotherm # List of the 3 thicknesses of the lithospheric layers 'upper\_crust', # 'lower\_crust' and 'mantle\_lithosphere'. If only one thickness is # given, then the same thickness is used for all layers. Units: $m$ set Layer thicknesses = 30000. # The value of the isotherm that is assumed at the # Lithosphere-Asthenosphere boundary. Units: $\si{K}$. set Lithosphere-Asthenosphere boundary isotherm = 1673.15 # The value of the surface temperature. Units: $\si{K}$. set Surface temperature = 273.15 end subsection Function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `cartesian', `spherical', and # `depth'. `spherical' coordinates are interpreted as r,phi or r,phi,theta # in 2D/3D respectively with theta being the polar angle. `depth' will # create a function, in which only the first parameter is non-zero, which # is interpreted to be the depth of the point. set Coordinate system = cartesian # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Harmonic perturbation # Doubled first lateral wave number of the harmonic perturbation. Equals # the spherical harmonic degree in 3D spherical shells. In all other cases # one equals half of a sine period over the model domain. This allows for # single up-/downswings. Negative numbers reverse the sign of the # perturbation but are not allowed for the spherical harmonic case. set Lateral wave number one = 8 # default: 3 # Doubled second lateral wave number of the harmonic perturbation. Equals # the spherical harmonic order in 3D spherical shells. In all other cases # one equals half of a sine period over the model domain. This allows for # single up-/downswings. Negative numbers reverse the sign of the # perturbation. set Lateral wave number two = 4 # default: 2 # The magnitude of the Harmonic perturbation. set Magnitude = 0.02 # default: 1.0 # The reference temperature that is perturbed by the harmonic function. # Only used in incompressible models. set Reference temperature = 1600.0 # Doubled radial wave number of the harmonic perturbation. One equals # half of a sine period over the model domain. This allows for single # up-/downswings. Negative numbers reverse the sign of the perturbation. set Vertical wave number = 1 end subsection Inclusion shape perturbation # The background temperature for the temperature field. set Ambient temperature = 1.0 # The X coordinate for the center of the shape. set Center X = 0.5 # The Y coordinate for the center of the shape. set Center Y = 0.5 # The Z coordinate for the center of the shape. This is only necessary for # three-dimensional fields. set Center Z = 0.5 # The gradient of the inclusion to be generated. set Inclusion gradient = constant # The shape of the inclusion to be generated. set Inclusion shape = circle # The temperature of the inclusion shape. This is only the true # temperature in the case of the constant gradient. In all other cases, it # gives one endpoint of the temperature gradient for the shape. set Inclusion temperature = 0.0 # The radius of the inclusion to be generated. For shapes with no radius # (e.g. square), this will be the width, and for shapes with no width, # this gives a general guideline for the size of the shape. set Shape radius = 1.0 end subsection Lithosphere Mask # The path to the LAB depth data file set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/lithosphere-mask/ # Method that is used to specify the depth of the # lithosphere-asthenosphere boundary. set Depth specification method = Value # File from which the lithosphere-asthenosphere boundary depth data is # read. set LAB depth filename = LAB_CAM2016.txt # The initial temperature within lithosphere, applied abovethe maximum # lithosphere depth. set Lithosphere temperature = 1600. # Units: m.The maximum depth of the lithosphere. The model will be NaNs # below this depth. set Maximum lithosphere depth = 200000.0 end subsection Patch on S40RTS # The maximum depth of the Vs ascii grid. The model will read in Vs from # S40RTS below this depth. set Maximum grid depth = 700000.0 # This will set the heterogeneity prescribed by the Vs ascii grid and # S40RTS to zero down to the specified depth (in meters). Note that your # resolution has to be adequate to capture this cutoff. For example if you # specify a depth of 660km, but your closest spherical depth layers are # only at 500km and 750km (due to a coarse resolution) it will only zero # out heterogeneities down to 500km. Similar caution has to be taken when # using adaptive meshing. set Remove temperature heterogeneity down to specified depth = -1.7976931348623157e+308 # The depth range (above maximum grid depth) over which to smooth. The # boundary is smoothed using a depth weighted combination of Vs values # from the ascii grid and S40RTS at each point in the region of # smoothing. set Smoothing length scale = 200000.0 subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which # the ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/patch-on-S40RTS/test/ # The file name of the model data. Provide file in format: (Velocity # file name).\%s\%d where \%s is a string specifying the boundary of the # model according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = upper_shell_3d.txt # Scalar factor, which is applied to the model data. You might want to # use this to scale the input to a reference model. Another way to use # this factor is to convert units of the input files. For instance, if # you provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end end subsection S40RTS perturbation # The path to the model data. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/S40RTS/ # The file name of the spherical harmonics coefficients from Ritsema et # al. set Initial condition file name = S40RTS.sph # The maximum order the users specify when reading the data file of # spherical harmonic coefficients, which must be smaller than the maximum # order the data file stored. This parameter will be used only if 'Specify # a lower maximum order' is set to true. set Maximum order = 20 # The reference temperature that is perturbed by the spherical harmonic # functions. Only used in incompressible models. set Reference temperature = 1600.0 # Option to remove the degree zero component from the perturbation, which # will ensure that the laterally averaged temperature for a fixed depth is # equal to the background temperature. set Remove degree 0 from perturbation = true # This will set the heterogeneity prescribed by S20RTS or S40RTS to zero # down to the specified depth (in meters). Note that your resolution has # to be adequate to capture this cutoff. For example if you specify a # depth of 660km, but your closest spherical depth layers are only at # 500km and 750km (due to a coarse resolution) it will only zero out # heterogeneities down to 500km. Similar caution has to be taken when # using adaptive meshing. set Remove temperature heterogeneity down to specified depth = -1.7976931348623157e+308 # Option to use a lower maximum order when reading the data file of # spherical harmonic coefficients. This is probably used for the faster # tests or when the users only want to see the spherical harmonic pattern # up to a certain order. set Specify a lower maximum order = false # The file name of the spline knot locations from Ritsema et al. set Spline knots depth file name = Spline_knots.txt # The value of the thermal expansion coefficient $\beta$. Units: $1/K$. set Thermal expansion coefficient in initial temperature scaling = 2e-5 # Option to take the thermal expansion coefficient from the material model # instead of from what is specified in this section. set Use thermal expansion coefficient from material model = false # This parameter specifies how the perturbation in shear wave velocity as # prescribed by S20RTS or S40RTS is scaled into a density perturbation. # See the general description of this model for more detailed # information. set Vs to density scaling = 0.25 # Method that is used to specify how the vs-to-density scaling varies with # depth. set Vs to density scaling method = constant subsection Ascii data vs to density model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which # the ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/S40RTS/ # The file name of the model data. Provide file in format: (Velocity # file name).\%s\%d where \%s is a string specifying the boundary of the # model according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = vs_to_density_Steinberger.txt # Scalar factor, which is applied to the model data. You might want to # use this to scale the input to a reference model. Another way to use # this factor is to convert units of the input files. For instance, if # you provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end end subsection SAVANI perturbation # The path to the model data. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/SAVANI/ # The file name of the spherical harmonics coefficients from Auer et al. set Initial condition file name = savani.dlnvs.60.m.ab # The maximum order the users specify when reading the data file of # spherical harmonic coefficients, which must be smaller than the maximum # order the data file stored. This parameter will be used only if 'Specify # a lower maximum order' is set to true. set Maximum order = 20 # The reference temperature that is perturbed by the spherical harmonic # functions. Only used in incompressible models. set Reference temperature = 1600.0 # Option to remove the degree zero component from the perturbation, which # will ensure that the laterally averaged temperature for a fixed depth is # equal to the background temperature. set Remove degree 0 from perturbation = true # This will set the heterogeneity prescribed by SAVANI to zero down to the # specified depth (in meters). Note that your resolution has to be # adequate to capture this cutoff. For example if you specify a depth of # 660km, but your closest spherical depth layers are only at 500km and # 750km (due to a coarse resolution) it will only zero out heterogeneities # down to 500km. Similar caution has to be taken when using adaptive # meshing. set Remove temperature heterogeneity down to specified depth = -1.7976931348623157e+308 # Option to use a lower maximum order when reading the data file of # spherical harmonic coefficients. This is probably used for the faster # tests or when the users only want to see the spherical harmonic pattern # up to a certain order. set Specify a lower maximum order = false # The file name of the spline knots taken from the 28 spherical layers of # SAVANI tomography model. set Spline knots depth file name = Spline_knots.txt # The value of the thermal expansion coefficient $\beta$. Units: $1/K$. set Thermal expansion coefficient in initial temperature scaling = 2e-5 # Option to take the thermal expansion coefficient from the material model # instead of from what is specified in this section. set Use thermal expansion coefficient from material model = false # This parameter specifies how the perturbation in shear wave velocity as # prescribed by SAVANI is scaled into a density perturbation. See the # general description of this model for more detailed information. set Vs to density scaling = 0.25 # Method that is used to specify how the vs-to-density scaling varies with # depth. set Vs to density scaling method = constant subsection Ascii data vs to density model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which # the ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/S40RTS/ # The file name of the model data. Provide file in format: (Velocity # file name).\%s\%d where \%s is a string specifying the boundary of the # model according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = vs_to_density_Steinberger.txt # Scalar factor, which is applied to the model data. You might want to # use this to scale the input to a reference model. Another way to use # this factor is to convert units of the input files. For instance, if # you provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end end subsection Spherical gaussian perturbation # The amplitude of the perturbation. set Amplitude = 0.01 # The angle where the center of the perturbation is placed. set Angle = 0. # The file from which the initial geotherm table is to be read. The format # of the file is defined by what is read in # source/initial\_conditions/spherical\_shell.cc. set Filename for initial geotherm table = initial-geotherm-table # The non-dimensional radial distance where the center of the perturbation # is placed. set Non-dimensional depth = 0.7 # The standard deviation of the Gaussian perturbation. set Sigma = 0.2 # The sign of the perturbation. set Sign = 1. end subsection Spherical hexagonal perturbation # The number of convection cells with which to perturb the system. set Angular mode = 6 # Amount of clockwise rotation in degrees to apply to the perturbations. # Default is set to -45 in order to provide backwards compatibility. set Rotation offset = -45. end end subsection Material model # Whether or not (and in the first case, how) to do any averaging of # material model output data when constructing the linear systems for # velocity/pressure, temperature, and compositions in each time step, as # well as their corresponding preconditioners. # # Possible choices: none|arithmetic average|harmonic average|geometric # average|pick largest|project to Q1|log average|harmonic average only # viscosity|project to Q1 only viscosity # # The process of averaging, and where it may be used, is discussed in more # detail in Section~\ref{sec:sinker-with-averaging}. # # More averaging schemes are available in the averaging material model. This # material model is a ``compositing material model'' which can be used in # combination with other material models. set Material averaging = none # The name of the material model to be used in this simulation. There are # many material models you can choose from, as listed below. They generally # fall into two category: (i) models that implement a particular case of # material behavior, (ii) models that modify other models in some way. We # sometimes call the latter ``compositing models''. An example of a # compositing model is the ``depth dependent'' model below in that it takes # another, freely choosable model as its base and then modifies that model's # output in some way. # # You can select one of the following models: # # `Steinberger': This material model looks up the viscosity from the tables # that correspond to the paper of Steinberger and Calderwood 2006 (``Models # of large-scale viscous flow in the Earth's mantle with constraints from # mineral physics and surface observations'', Geophys. J. Int., 167, # 1461-1481, \url{http://dx.doi.org/10.1111/j.1365-246X.2006.03131.x}) and # material data from a database generated by the thermodynamics code # \texttt{Perplex}, see \url{http://www.perplex.ethz.ch/}. The default # example data builds upon the thermodynamic database by Stixrude 2011 and # assumes a pyrolitic composition by Ringwood 1988 but is easily replaceable # by other data files. # # `ascii reference profile': A material model that reads in a reference # state for density, thermal expansivity, compressibility and specific heat # from a text file. # Note the required format of the input data: The first lines may contain # any number of comments if they begin with `#', but one of these lines # needs to contain the number of points in the reference state as for # example `# POINTS: 3'. Following the comment lines there has to be a # single line containing the names of all data columns, separated by # arbitrarily many spaces. Column names are not allowed to contain spaces. # The file can contain unnecessary columns, but for this plugin it needs to # at least provide the columns named `density', `thermal\_expansivity', # `specific\_heat', and `compressibility'. Note that the data lines in the # file need to be sorted in order of increasing depth from 0 to the maximal # depth in the model domain. Points in the model that are outside of the # provided depth range will be assigned the maximum or minimum depth values, # respectively. Points do not need to be equidistant, but the computation of # properties is optimized in speed if they are. # # The viscosity $\eta$ is computed as \begin{equation}\eta(z,T) = \eta_r(z) # \eta_0 \exp\left(-A \frac{T - # T_{\text{adi}}}{T_{\text{adi}}}\right),\end{equation}where $\eta_r(z)$ is # the depth-dependence, which is a piecewise constant function computed # according to the list of ``Viscosity prefactors'' and ``Transition # depths'', $\eta_0$ is the reference viscosity specified by the parameter # ``Viscosity'' and $A$ describes the dependence on temperature and # corresponds to the parameter ``Thermal viscosity exponent''. # # `averaging': The ``averaging'' Material model applies an averaging of the # quadrature points within a cell. The values to average are supplied by any # of the other available material models. In other words, it is a # ``compositing material model''. Parameters related to the average model # are read from a subsection ``Material model/Averaging''. # # The user must specify a ``Base model'' from which material properties are # derived. Furthermore an averaging operation must be selected, where the # Choice should be from the list none|arithmetic average|harmonic # average|geometric average|pick largest|log average|NWD arithmetic # average|NWD harmonic average|NWD geometric average. # # NWD stands for Normalized Weighed Distance. The models with this in front # of their name work with a weighed average, which means each quadrature # point requires an individual weight. The weight is determined by the # distance, where the exact relation is determined by a bell shaped curve. A # bell shaped curve is a continuous function which is one at its maximum and # exactly zero at and beyond its limit. This bell shaped curve is spanned # around each quadrature point to determine the weighting map for each # quadrature point. The used bell shape comes from Lucy (1977). The distance # is normalized so the largest distance becomes one. This means that if # variable ''Bell shape limit'' is exactly one, the farthest quadrature # point is just on the limit and its weight will be exactly zero. In this # plugin it is not implemented as larger and equal than the limit, but # larger than, to ensure the quadrature point at distance zero is always # included. # # `compositing': The ``compositing'' Material model selects material model # properties from a given set of other material models, and is intended to # make mixing different material models easier. # # Specifically, this material model works by allowing to specify the name of # another material model for each coefficient that material models are asked # for (such as the viscosity, density, etc.). Whenever the material model is # asked for the values of coefficients, it then evaluates all of the ``base # models'' that were listed for the various coefficients, and copies the # values returned by these base models into the output structure. # # The implementation of this material model is somewhat expensive because it # has to evaluate all material coefficients of all underlying material # models. Consequently, if performance of assembly and postprocessing is # important, then implementing a separate material model is a better choice # than using this material model. # # `composition reaction': A material model that behaves in the same way as # the simple material model, but includes two compositional fields and a # reaction between them. Above a depth given in the input file, the first # fields gets converted to the second field. # # `depth dependent': The ``depth dependent'' Material model applies a # depth-dependent scaling to any of the other available material models. In # other words, it is a ``compositing material model''. # # Parameters related to the depth dependent model are read from a subsection # ``Material model/Depth dependent model''. The user must specify a ``Base # model'' from which material properties are derived. Currently the depth # dependent model only allows depth dependence of viscosity - other material # properties are taken from the ``Base model''. Viscosity $\eta$ at depth # $z$ is calculated according to:\begin{equation}\eta(z,p,T,X,...) = \eta(z) # \eta_b(p,T,X,..)/\eta_{rb}\end{equation}where $\eta(z)$ is the # depth-dependence specified by the depth dependent model, # $\eta_b(p,T,X,...)$ is the viscosity calculated from the base model, and # $\eta_{rb}$ is the reference viscosity of the ``Base model''. In addition # to the specification of the ``Base model'', the user must specify the # method to be used to calculate the depth-dependent viscosity $\eta(z)$ as # ``Material model/Depth dependent model/Depth dependence method'', which # can be chosen among ``None|Function|File|List''. Each method and the # associated parameters are as follows: # # ``Function'': read a user-specified parsed function from the input file in # a subsection ``Material model/Depth dependent model/Viscosity depth # function''. By default, this function is uniformly equal to 1.0e21. # Specifying a function that returns a value less than or equal to 0.0 # anywhere in the model domain will produce an error. # # ``File'': read a user-specified file containing viscosity values at # specified depths. The file containing depth-dependent viscosities is read # from a directory specified by the user as ``Material model/Depth dependent # model/Data directory'', from a file with name specified as ``Material # model/Depth dependent model/Viscosity depth file''. The format of this # file is ascii text and contains two columns with one header line: # # example Viscosity depth file:\\Depth (m) Viscosity # (Pa-s)\\0.0000000e+00 1.0000000e+21\\6.7000000e+05 1.0000000e+22\\ # # Viscosity is interpolated from this file using linear interpolation. # ``None'': no depth-dependence. Viscosity is taken directly from ``Base # model'' # # ``List:'': read a comma-separated list of depth values corresponding to # the maximum depths of layers having constant depth-dependence $\eta(z)$. # The layers must be specified in order of increasing depth, and the last # layer in the list must have a depth greater than or equal to the maximal # depth of the model. The list of layer depths is specified as ``Material # model/Depth dependent model/Depth list'' and the corresponding list of # layer viscosities is specified as ``Material model/Depth dependent # model/Viscosity list'' # # `diffusion dislocation': An implementation of a viscous rheology including # diffusion and dislocation creep. Compositional fields can each be assigned # individual activation energies, reference densities, thermal # expansivities, and stress exponents. The effective viscosity is defined as # # # \[\eta_{\text{eff}} = \left(\frac{1}{\eta_{\text{eff}}^\text{diff}}+ # \frac{1}{\eta_{\text{eff}}^\text{dis}}\right)^{-1}\] where # \[\eta_{\text{i}} = \frac{1}{2} A^{-\frac{1}{n_i}} d^\frac{m_i}{n_i} # \dot{\varepsilon_i}^{\frac{1-n_i}{n_i}} \exp\left(\frac{E_i^\ast + # PV_i^\ast}{n_iRT}\right)\] # # where $d$ is grain size, $i$ corresponds to diffusion or dislocation # creep, $\dot{\varepsilon}$ is the square root of the second invariant of # the strain rate tensor, $R$ is the gas constant, $T$ is temperature, and # $P$ is pressure. $A_i$ are prefactors, $n_i$ and $m_i$ are stress and # grain size exponents $E_i$ are the activation energies and $V_i$ are the # activation volumes. # # This form of the viscosity equation is commonly used in geodynamic # simulatons See, for example, Billen and Hirth (2007), G3, 8, Q08012. # Significantly, other studies may use slightly different forms of the # viscosity equation leading to variations in how specific terms are defined # or combined. For example, the grain size exponent should always be # positive in the diffusion viscosity equation used here, while other # studies place the grain size term in the denominator and invert the sign # of the grain size exponent. When examining previous work, one should # carefully check how the viscous prefactor and grain size terms are # defined. # # The ratio of diffusion to dislocation strain rate is found by Newton's # method, iterating to find the stress which satisfies the above equations. # The value for the components of this formula and additional parameters are # read from the parameter file in subsection 'Material # model/DiffusionDislocation'. # # `drucker prager': A material model that has constant values for all # coefficients but the density and viscosity. The defaults for all # coefficients are chosen to be similar to what is believed to be correct # for Earth's mantle. All of the values that define this model are read from # a section ``Material model/Drucker Prager'' in the input file, see # Section~\ref{parameters:Material_20model/Drucker_20Prager}. Note that the # model does not take into account any dependencies of material properties # on compositional fields. # # The viscosity is computed according to the Drucker Prager frictional # plasticity criterion (non-associative) based on a user-defined internal # friction angle $\phi$ and cohesion $C$. In 3D: $\sigma_y = \frac{6 C # \cos(\phi)}{\sqrt(3) (3+\sin(\phi))} + \frac{2 P \sin(\phi)}{\sqrt(3) # (3+\sin(\phi))}$, where $P$ is the pressure. See for example Zienkiewicz, # O. C., Humpheson, C. and Lewis, R. W. (1975), G\'{e}otechnique 25, No. 4, # 671-689. With this formulation we circumscribe instead of inscribe the # Mohr Coulomb yield surface. In 2D the Drucker Prager yield surface is the # same as the Mohr Coulomb surface: $\sigma_y = P \sin(\phi) + C # \cos(\phi)$. Note that in 2D for $\phi=0$, these criteria revert to the # von Mises criterion (no pressure dependence). See for example Thieulot, C. # (2011), PEPI 188, 47-68. # # Note that we enforce the pressure to be positive to prevent negative yield # strengths and viscosities. # # We then use the computed yield strength to scale back the viscosity on to # the yield surface using the Viscosity Rescaling Method described in # Kachanov, L. M. (2004), Fundamentals of the Theory of Plasticity, Dover # Publications, Inc. (Not Radial Return.)A similar implementation can be # found in GALE (https://geodynamics.org/cig/software/gale/gale-manual.pdf). # # # To avoid numerically unfavourably large (or even negative) viscosity # ranges, we cut off the viscosity with a user-defined minimum and maximum # viscosity: $\eta_eff = \frac{1}{\frac{1}{\eta_min + \eta}+ # \frac{1}{\eta_max}}$. # # Note that this model uses the formulation that assumes an incompressible # medium despite the fact that the density follows the law # $\rho(T)=\rho_0(1-\beta(T-T_{\text{ref}}))$. # # `dynamic friction': This model is for use with an arbitrary number of # compositional fields, where each field represents a rock type which can # have completely different properties from the others.Each rock type itself # has constant material properties, with the exception of viscosity which is # modified according to a Drucker-Prager yield criterion. Unlike the drucker # prager or visco plastic material models, the angle of internal friction is # a function of velocity. This relationship is similar to rate-and-state # friction constitutive relationships, which are applicable to the strength # of rocks during earthquakes. The formulation used here is derived from van # Dinther et al. 2013, JGR. Each compositional field is interpreed as a # volume fraction. If the sum of the fields is greater than one, they are # renormalized. If it is less than one, material properties for ``background # material'' make up the rest. When more than one field is present, the # material properties are averaged arithmetically. An exception is the # viscosity, where the averaging should make more of a difference. For this, # the user selectsbetween arithmetic, harmonic, geometric, or maximum # composition averaging. # # `grain size': A material model that relies on compositional fields that # correspond to the average grain sizes of a mineral phase and source terms # that determine the grain size evolution in terms of the strain rate, # temperature, phase transitions, and the creep regime. This material model # only works if a compositional field named 'grain_size' is present. In the # diffusion creep regime, the viscosity depends on this grain size field. We # use the grain size evolution laws described in Behn et al., 2009. # Implications of grain size evolution on the seismic structure of the # oceanic upper mantle, Earth Planet. Sci. Letters, 282, 178–189. Other # material parameters are either prescribed similar to the 'simple' material # model, or read from data files that were generated by the Perplex or # Hefesto software. This material model is described in more detail in # Dannberg, J., Z. Eilon, U. Faul, R. Gassmoeller, P. Moulik, and R. Myhill # (2017), The importance of grain size to mantle dynamics and seismological # observations, Geochem. Geophys. Geosyst., 18, 3034–3061, # doi:10.1002/2017GC006944. # # `latent heat': A material model that includes phase transitions and the # possibility that latent heat is released or absorbed when material crosses # one of the phase transitions of up to two different materials # (compositional fields). This model implements a standard approximation of # the latent heat terms following Christensen \& Yuen, 1985. The change of # entropy is calculated as $Delta S = \gamma \frac{\Delta\rho}{\rho^2}$ with # the Clapeyron slope $\gamma$ and the density change $\Delta\rho$ of the # phase transition being input parameters. The model employs an analytic # phase function in the form $X=\frac{1}{2} \left( 1 + \tanh \left( # \frac{\Delta p}{\Delta p_0} \right) \right)$ with $\Delta p = p - # p_{\text{transition}} - \gamma \left( T - T_{\text{transition}} \right)$ # and $\Delta p_0$ being the pressure difference over the width of the phase # transition (specified as input parameter). # # `latent heat melt': A material model that includes the latent heat of # melting for two materials: peridotite and pyroxenite. The melting model # for peridotite is taken from Katz et al., 2003 (A new parameterization of # hydrous mantle melting) and the one for pyroxenite from Sobolev et al., # 2011 (Linking mantle plumes, large igneous provinces and environmental # catastrophes). The model assumes a constant entropy change for melting # 100\% of the material, which can be specified in the input file. The # partial derivatives of entropy with respect to temperature and pressure # required for calculating the latent heat consumption are then calculated # as product of this constant entropy change, and the respective derivative # of the function the describes the melt fraction. This is linearly averaged # with respect to the fractions of the two materials present. If no # compositional fields are specified in the input file, the model assumes # that the material is peridotite. If compositional fields are specified, # the model assumes that the first compositional field is the fraction of # pyroxenite and the rest of the material is peridotite. # # Otherwise, this material model has a temperature- and pressure-dependent # density and viscosity and the density and thermal expansivity depend on # the melt fraction present. It is possible to extent this model to include # a melt fraction dependence of all the material parameters by calling the # function melt_fraction in the calculation of the respective parameter. # However, melt and solid move with the same velocity and melt extraction is # not taken into account (batch melting). # # `melt global': A material model that implements a simple formulation of # the material parameters required for the modelling of melt transport, # including a source term for the porosity according to a simplified linear # melting model similar to \cite{schmeling2006}: # $\phi_{\text{equilibrium}} = # \frac{T-T_{\text{sol}}}{T_{\text{liq}}-T_{\text{sol}}}$ # with $T_{\text{sol}} = T_{\text{sol,0}} + \Delta T_p \, p + \Delta T_c \, # C$ # $T_{\text{liq}} = T_{\text{sol}} + \Delta T_{\text{sol-liq}}$. # # `melt simple': A material model that implements a simple formulation of # the material parameters required for the modelling of melt transport, # including a source term for the porosity according to the melting model # for dry peridotite of \cite{KSL2003}. This also includes a computation of # the latent heat of melting (if the `latent heat' heating model is active). # # Most of the material properties are constant, except for the shear, # viscosity $\eta$, the compaction viscosity $\xi$, and the permeability # $k$, which depend on the porosity; and the solid and melt densities, which # depend on temperature and pressure: # $\eta(\phi,T) = \eta_0 e^{\alpha(\phi-\phi_0)} e^{-\beta(T-T_0)/T_0}$, # $\xi(\phi,T) = \xi_0 \frac{\phi_0}{\phi} e^{-\beta(T-T_0)/T_0}$, $k=k_0 # \phi^n (1-\phi)^m$, $\rho=\rho_0 (1 - \alpha (T - T_{\text{adi}})) # e^{\kappa p}$. # # The model is compressible only if this is specified in the input file, and # contains compressibility for both solid and melt. # # `modified tait': A material model that implements the thermal modified # Tait equation of state as written in \cite{HP2011}. Constant values are # used for the thermal conductivity and viscosity. The defaults for all # coefficients are chosen to be similar to what is believed to be correct # for Earth's mantle. All of the values that define this model are read from # a section ``Material model/Modified Tait model'' in the input file, see # Section~\ref{parameters:Material_20model/Modified_20Tait_20model}. # # `multicomponent': This model is for use with an arbitrary number of # compositional fields, where each field represents a rock type which can # have completely different properties from the others. However, each rock # type itself has constant material properties. The value of the # compositional field is interpreted as a volume fraction. If the sum of the # fields is greater than one, they are renormalized. If it is less than # one, material properties for ``background mantle'' make up the rest. When # more than one field is present, the material properties are averaged # arithmetically. An exception is the viscosity, where the averaging should # make more of a difference. For this, the user selects between arithmetic, # harmonic, geometric, or maximum composition averaging. # # `multicomponent compressible': This model is for use with an arbitrary # number of compositional fields, where each field represents a rock type # which can have completely different properties from the others. Each rock # type is described by a self-consistent equation of state. The value of # the compositional field is interpreted as a volume fraction. If the sum # of the fields is greater than one, they are renormalized. If it is less # than one, material properties for ``background mantle'' make up the rest. # When more than one field is present, the bulk material properties are # calculated self-consistently. An exception is the viscosity, where there # is no unique solution to the averaging. For this, the user selects # between arithmetic, harmonic, geometric, or maximum composition averaging. # # `nondimensional': A material model for nondimensionalized computations for # compressible or incompressible computations defined through Rayleigh # number ext{Ra} and Dissipation number Di. This model is made to be used # with the Boussinesq, ALA, or TALA formulation. # # The viscosity is defined as \[\eta = \text{Di} / \text{Ra} \cdot \exp(-b # T' + c z)\] where $T'$ is the temperature variation from the adiabatic # temperature, $z$ is the depth, $b$ is given by ``Viscosity temperature # prefactor'', and $c$ by ``Viscosity depth prefactor''. If $\text{Di}$ is # zero, it will be replaced by 1.0 in $\eta$. # # The density is defined as \[\rho = \exp(\text{Di}/\gamma \cdot z) (1.0 - # \alpha T' + \text{Di} \gamma p'),\] where $\alpha=\text{Di}$ is the # thermal expansion coefficient, $\gamma$ is the Grueneisen parameter, and # $p'$ is the pressure variation from the adiabatic pressure. The pressure # dependent term is not present if ``TALA'' is enabled. # # `perplex lookup': A material model that has constant values for viscosity # and thermal conductivity, and calculates other properties on-the-fly using # PerpleX meemum. Compositional fields correspond to the individual # components in the order given in the PerpleX file. # # `replace lithosphere viscosity': The ``replace lithosphere viscosity'' # Material model sets viscosity to a prescribed constant above the # lithosphere-asthenosphere boundary (specified by an ascii file or maximum # lithosphere depth). Below the lithosphere-asthenosphereboundary the # viscosity is taken from any of the other available material model. In # other words, it is a ``compositing material model''. # Parameters related to the replace lithosphere viscosity model are read # from a subsection ``Material model/Replace lithosphere viscosity''. The # user must specify a ``Base model'' from which other material properties # are derived. # Note the required format of the input data file: The first lines may # contain any number of comments if they begin with ‘#’, but one of # these lines needs to contain the number of grid points in each dimension # as for example‘# POINTS: 3 3’. For a spherical model, the order of the # data columns has to be'phi', 'theta','depth (m)', where phi is the # azimuth angle and theta is the polar angle measured positive from the # north pole. # # `simple': A material model that has constant values for all coefficients # but the density and viscosity. The defaults for all coefficients are # chosen to be similar to what is believed to be correct for Earth's mantle. # All of the values that define this model are read from a section # ``Material model/Simple model'' in the input file, see # Section~\ref{parameters:Material_20model/Simple_20model}. # # This model uses the following set of equations for the two coefficients # that are non-constant: \begin{align} \eta(p,T,\mathfrak c) &= \tau(T) # \zeta(\mathfrak c) \eta_0, \\ \rho(p,T,\mathfrak c) &= \left(1-\alpha # (T-T_0)\right)\rho_0 + \Delta\rho \; c_0,\end{align}where $c_0$ is the # first component of the compositional vector $\mathfrak c$ if the model # uses compositional fields, or zero otherwise. # # The temperature pre-factor for the viscosity formula above is defined as # \begin{align} \tau(T) &= H\left(e^{-\beta # (T-T_0)/T_0}\right),\intertext{with} \qquad\qquad H(x) &= \begin{cases} # \tau_{\text{min}} & \text{if}\; x<\tau_{\text{min}}, \\ # x & \text{if}\; 10^{-2}\le x \le 10^2, \\ # \tau_{\text{max}} & \text{if}\; x>\tau_{\text{max}}, \\ # \end{cases}\end{align} where $x=e^{-\beta (T-T_0)/T_0}$, $\beta$ # corresponds to the input parameter ``Thermal viscosity exponent'', and # $T_0$ to the parameter ``Reference temperature''. If you set $T_0=0$ in # the input file, the thermal pre-factor $\tau(T)=1$. The parameters # $\tau_{\text{min}}$ and $\tau_{\text{max}}$ set the minimum and maximum # values of the temperature pre-factor and are set using ``Maximum thermal # prefactor'' and ``Minimum thermal prefactor''. Specifying a value of 0.0 # for the minimum or maximum values will disable pre-factor limiting. # # The compositional pre-factor for the viscosity is defined as \begin{align} # \zeta(\mathfrak c) &= \xi^{c_0}\end{align} if the model has compositional # fields and equals one otherwise. $\xi$ corresponds to the parameter # ``Composition viscosity prefactor'' in the input file. # # Finally, in the formula for the density, $\alpha$ corresponds to the # ``Thermal expansion coefficient'' and $\Delta\rho$ corresponds to the # parameter ``Density differential for compositional field 1''. # # Note that this model uses the formulation that assumes an incompressible # medium despite the fact that the density follows the law # $\rho(T)=\rho_0(1-\alpha(T-T_{\text{ref}}))$. # # \note{Despite its name, this material model is not exactly ``simple'', as # indicated by the formulas above. While it was originally intended to be # simple, it has over time acquired all sorts of temperature and # compositional dependencies that weren't initially intended. Consequently, # there is now a ``simpler'' material model that now fills the role the # current model was originally intended to fill.} # # `simple compressible': A material model that has constant values for all # coefficients but the density. The defaults for all coefficients are chosen # to be similar to what is believed to be correct for Earth's mantle. All of # the values that define this model are read from a section ``Material # model/Simple compressible model'' in the input file, see # Section~\ref{parameters:Material_20model/Simple_20compressible_20model}. # # This model uses the following equations for the density: \begin{align} # \rho(p,T) = \rho_0 \left(1-\alpha (T-T_a)\right) # \exp{\beta (P-P_0))}\end{align}This formulation for the density assumes # that the compressibility provided by the user is the adiabatic # compressibility ($\beta_S$). The thermal expansivity and isentropic # compressibility implied by the pressure and temperature dependence are # equal to the user-defined constant values only along the reference # isentrope, and there is also an implicit pressure dependence to the heat # capacity $C_p$ via Maxwell's relations. # # `simpler': A material model that has constant values except for density, # which depends linearly on temperature: \begin{align} \rho(p,T) &= # \left(1-\alpha (T-T_0)\right)\rho_0.\end{align} # # \note{This material model fills the role the ``simple'' material model was # originally intended to fill, before the latter acquired all sorts of # complicated temperature and compositional dependencies.} # # `visco plastic': An implementation of an incompressible # visco(elastic)-plastic rheology with options for selecting dislocation # creep, diffusion creep or composite viscous flow laws. Prior to yielding, # one may select to modify the viscosity to account for viscoelastic # effects. Plasticity limits viscous stresses through a Drucker Prager yield # criterion. Note that this material model is based heavily on the # DiffusionDislocation (Bob Myhill), DruckerPrager (Anne Glerum), and # Viscoelastic (John Naliboff) material models. # # The viscosity for dislocation or diffusion creep is defined as \[v = \frac # 12 A^{-\frac{1}{n}} d^{\frac{m}{n}} \dot{\varepsilon}_{ii}^{\frac{1-n}{n}} # \exp\left(\frac{E + PV}{nRT}\right)\] where $A$ is the prefactor, $n$ is # the stress exponent, $\dot{\varepsilon}_{ii}$ is the square root of the # deviatoric strain rate tensor second invariant, $d$ is grain size, $m$ is # the grain size exponent, $E$ is activation energy, $V$ is activation # volume, $P$ is pressure, $R$ is the gas exponent and $T$ is temperature. # This form of the viscosity equation is commonly used in geodynamic # simulations. See, for example, Billen and Hirth (2007), G3, 8, Q08012. # Significantly, other studies may use slightly different forms of the # viscosity equation leading to variations in how specific terms are defined # or combined. For example, the grain size exponent should always be # positive in the diffusion viscosity equation used here, while other # studies place the grain size term in the denominator and invert the sign # of the grain size exponent. When examining previous work, one should # carefully check how the viscous prefactor and grain size terms are # defined. # # One may select to use the diffusion ($v_{\text{diff}}$; $n=1$, $m!=0$), # dislocation ($v_{\text{disl}}$, $n>1$, $m=0$) or composite # $\frac{v_{\text{diff}} v_{\text{disl}}}{v_{\text{diff}}+v_{\text{disl}}}$ # equation form. # # The diffusion and dislocation prefactors can be weakened with a factor # between 0 and 1 according to the total or the viscous strain only. # # Viscosity is limited through one of two different `yielding' mechanisms. # # The first plasticity mechanism limits viscous stress through a Drucker # Prager yield criterion, where the yield stress in 3D is $\sigma_y = # \frac{6C\cos(\phi) + 2P\sin(\phi)} {\sqrt(3)(3+\sin(\phi))}$ and $\sigma_y # = C\cos(\phi) + P\sin(\phi)$ in 2D. Above, $C$ is cohesion and $\phi$ is # the angle of internal friction. Note that the 2D form is equivalent to # the Mohr Coulomb yield surface. If $\phi$ is 0, the yield stress is fixed # and equal to the cohesion (Von Mises yield criterion). When the viscous # stress ($2v{\varepsilon}_{ii}$) exceeds the yield stress, the viscosity is # rescaled back to the yield surface: # $v_{y}=\sigma_{y}/(2{\varepsilon}_{ii})$. This form of plasticity is # commonly used in geodynamic models. See, for example, Thieulot, C. (2011), # PEPI 188, pp. 47-68. # # The user has the option to linearly reduce the cohesion and internal # friction angle as a function of the finite strain magnitude. The finite # strain invariant or full strain tensor is calculated through compositional # fields within the material model. This implementation is identical to the # compositional field finite strain plugin and cookbook described in the # manual (author: Gassmoeller, Dannberg). If the user selects to track the # finite strain invariant ($e_{ii}$), a single compositional field tracks # the value derived from $e_{ii}^t = (e_{ii})^{(t-1)} + \dot{e}_{ii}\; dt$, # where $t$ and $t-1$ are the current and prior time steps, $\dot{e}_{ii}$ # is the second invariant of the strain rate tensor and $dt$ is the time # step size. In the case of the full strain tensor $F$, the finite strain # magnitude is derived from the second invariant of the symmetric stretching # tensor $L$, where $L = F [F]^T$. The user must specify a single # compositional field for the finite strain invariant or multiple fields (4 # in 2D, 9 in 3D) for the finite strain tensor. These field(s) must be the # first listed compositional fields in the parameter file. Note that one or # more of the finite strain tensor components must be assigned a non-zero # value initially. This value can be be quite small (e.g., 1.e-8), but still # non-zero. While the option to track and use the full finite strain tensor # exists, tracking the associated compositional fields is computationally # expensive in 3D. Similarly, the finite strain magnitudes may in fact # decrease if the orientation of the deformation field switches through # time. Consequently, the ideal solution is track the finite strain # invariant (single compositional) field within the material and track the # full finite strain tensor through particles.When only the second invariant # of the strain is tracked, one has the option to track the full strain or # only the plastic strain. In the latter case, strain is only tracked in # case the material is plastically yielding, i.e. the viscous stess > yield # stress. # # Viscous stress may also be limited by a non-linear stress limiter that has # a form similar to the Peierls creep mechanism. This stress limiter assigns # an effective viscosity $\sigma_{\text{eff}} = # \frac{\tau_y}{2\varepsilon_y} # {\frac{\varepsilon_{ii}}{\varepsilon_y}}^{\frac{1}{n_y}-1}$ Above $\tau_y$ # is a yield stress, $\varepsilon_y$ is the reference strain rate, # $\varepsilon_{ii}$ is the strain rate and $n_y$ is the stress limiter # exponent. The yield stress, $\tau_y$, is defined through the Drucker # Prager yield criterion formulation. This method of limiting viscous stress # has been used in various forms within the geodynamic literature # \cite{chri92,vavv02,cibi13,cibi15}.When $n_y$ is 1, it essentially becomes # a linear viscosity model, and in the limit $n_y\rightarrow \infty$ it # converges to the standard viscosity rescaling method (concretely, values # $n_y>20$ are large enough). # # The visco-plastic rheology described above may also be modified to include # viscoelastic deformation, thus producing a viscoelastic plastic # constitutive relationship. # # The viscoelastic rheology behavior takes into account the elastic shear # strength (e.g., shear modulus), while the tensile and volumetric strength # (e.g., Young's and bulk modulus) are not considered. The model is # incompressible and allows specifying an arbitrary number of compositional # fields, where each field represents a different rock type or component of # the viscoelastic stress tensor. The stress tensor in 2D and 3D, # respectively, contains 3 or 6 components. The compositional fields # representing these components must be named and listed in a very specific # format, which is designed to minimize mislabeling stress tensor components # as distinct 'compositional rock types' (or vice versa). For 2D models, the # first three compositional fields must be labeled 'stress\_xx', # 'stress\_yy' and 'stress\_xy'. In 3D, the first six compositional fields # must be labeled 'stress\_xx', 'stress\_yy', 'stress\_zz', 'stress\_xy', # 'stress\_xz', 'stress\_yz'. # # Combining this viscoelasticity implementation with non-linear viscous flow # and plasticity produces a constitutive relationship commonly referred to # as partial elastoviscoplastic (e.g., pEVP) in the geodynamics community. # While extensively discussed and applied within the geodynamics literature, # notable references include: Moresi et al. (2003), J. Comp. Phys., v. 184, # p. 476-497. Gerya and Yuen (2007), Phys. Earth. Planet. Inter., v. 163, p. # 83-105. Gerya (2010), Introduction to Numerical Geodynamic Modeling. Kaus # (2010), Tectonophysics, v. 484, p. 36-47. Choi et al. (2013), J. Geophys. # Res., v. 118, p. 2429-2444. Keller et al. (2013), Geophys. J. Int., v. # 195, p. 1406-1442. # # The overview below directly follows Moresi et al. (2003) eqns. 23-38. # However, an important distinction between this material model and the # studies above is the use of compositional fields, rather than tracers, to # track individual components of the viscoelastic stress tensor. The # material model will be updated when an option to track and calculate # viscoelastic stresses with tracers is implemented. # # Moresi et al. (2003) begins (eqn. 23) by writing the deviatoric rate of # deformation ($\hat{D}$) as the sum of elastic ($\hat{D_{e}}$) and viscous # ($\hat{D_{v}}$) components: $\hat{D} = \hat{D_{e}} + \hat{D_{v}}$. These # terms further decompose into $\hat{D_{v}} = \frac{\tau}{2\eta}$ and # $\hat{D_{e}} = \frac{\overset{\triangledown}{\tau}}{2\mu}$, where $\tau$ # is the viscous deviatoric stress, $\eta$ is the shear viscosity, $\mu$ is # the shear modulus and $\overset{\triangledown}{\tau}$ is the Jaumann # corotational stress rate. If plasticity is included the deviatoric rate of # deformation may be written as: $\hat{D} = \hat{D_{e}} + \hat{D_{v}} + # \hat{D_{p}}$, where $\hat{D_{p}}$ is the plastic component. As defined in # the second paragraph, $\hat{D_{p}}$ decomposes to # $\frac{\tau_{y}}{2\eta_{y}}$, where $\tau_{y}$ is the yield stress and # $\eta_{y}$ is the viscosity rescaled to the yield surface. # # Above, the Jaimann corotational stress rate (eqn. 24) from the elastic # component contains the time derivative of the deviatoric stress # ($\dot{\tau}$) and terms that account for material spin (e.g., rotation) # due to advection: $\overset{\triangledown}{\tau} = \dot{\tau} + {\tau}W # -W\tau$. Above, $W$ is the material spin tensor (eqn. 25): $W_{ij} = # \frac{1}{2} \left (\frac{\partial V_{i}}{\partial x_{j}} - \frac{\partial # V_{j}}{\partial x_{i}} \right )$. # # The Jaumann stress-rate can also be approximated using terms from the time # at the previous time step ($t$) and current time step ($t + \Delta # t^{e}$): $\smash[t]{\overset{\triangledown}{\tau}}^{t + \Delta t^{e}} # \approx \frac{\tau^{t + \Delta t^{e} - \tau^{t}}}{\Delta t^{e}} - # W^{t}\tau^{t} + \tau^{t}W^{t}$. In this material model, the size of the # time step above ($\Delta t^{e}$) can be specified as the numerical time # step size or an independent fixed time step. If the latter case is a # selected, the user has an option to apply a stress averaging scheme to # account for the differences between the numerical and fixed elastic time # step (eqn. 32). If one selects to use a fixed elastic time step throughout # the model run, this can still be achieved by using CFL and maximum time # step values that restrict the numerical time step to a specific time. # # The formulation above allows rewriting the total rate of deformation (eqn. # 29) as $\tau^{t + \Delta t^{e}} = \eta_{eff} \left ( 2\hat{D}^{t + # \triangle t^{e}} + \frac{\tau^{t}}{\mu \Delta t^{e}} + \frac{W^{t}\tau^{t} # - \tau^{t}W^{t}}{\mu} \right )$. # # The effective viscosity (eqn. 28) is a function of the viscosity ($\eta$), # elastic time step size ($\Delta t^{e}$) and shear relaxation time ($ # \alpha = \frac{\eta}{\mu} $): $\eta_{eff} = \eta \frac{\Delta # t^{e}}{\Delta t^{e} + \alpha}$ The magnitude of the shear modulus thus # controls how much the effective viscosity is reduced relative to the # initial viscosity. # # Elastic effects are introduced into the governing Stokes equations through # an elastic force term (eqn. 30) using stresses from the previous time # step: $F^{e,t} = -\frac{\eta_{eff}}{\mu \Delta t^{e}} \tau^{t}$. This # force term is added onto the right-hand side force vector in the system of # equations. # # When plastic yielding occurs, the effective viscosity in equation 29 and # 30 is the plastic viscosity (equation 35). If the current stress is below # the plastic yield stress, the effective viscosity is still as defined in # equation 28. During non-linear iterations, we define the current stress # prior to yielding (e.g., value compared to yield stress) as $\tau^{t + # \Delta t^{e}} = \eta_{eff} \left ( 2\hat{D}^{t + \triangle t^{e}} + # \frac{\tau^{t}}{\mu \Delta t^{e}} \right ) $ # # Compositional fields can each be assigned individual values of thermal # diffusivity, heat capacity, density, thermal expansivity and rheological # parameters. # # If more than one compositional field is present at a given point, # viscosities are averaged with an arithmetic, geometric harmonic (default) # or maximum composition scheme. # # The value for the components of this formula and additional parameters are # read from the parameter file in subsection 'Material model/Visco # Plastic'. # # `viscoelastic': An implementation of a simple linear viscoelastic rheology # that only includes the deviatoric components of elasticity. Specifically, # the viscoelastic rheology only takes into account the elastic shear # strength (e.g., shear modulus), while the tensile and volumetric strength # (e.g., Young's and bulk modulus) are not considered. The model is # incompressible and allows specifying an arbitrary number of compositional # fields, where each field represents a different rock type or component of # the viscoelastic stress tensor. The stress tensor in 2D and 3D, # respectively, contains 3 or 6 components. The compositional fields # representing these components must be named and listed in a very specific # format, which is designed to minimize mislabeling stress tensor components # as distinct 'compositional rock types' (or vice versa). For 2D models, the # first three compositional fields must be labeled 'stress\_xx', # 'stress\_yy' and 'stress\_xy'. In 3D, the first six compositional fields # must be labeled 'stress\_xx', 'stress\_yy', 'stress\_zz', 'stress\_xy', # 'stress\_xz', 'stress\_yz'. # # Expanding the model to include non-linear viscous flow (e.g., # diffusion/dislocation creep) and plasticity would produce a constitutive # relationship commonly referred to as partial elastoviscoplastic (e.g., # pEVP) in the geodynamics community. While extensively discussed and # applied within the geodynamics literature, notable references include: # Moresi et al. (2003), J. Comp. Phys., v. 184, p. 476-497. Gerya and Yuen # (2007), Phys. Earth. Planet. Inter., v. 163, p. 83-105. Gerya (2010), # Introduction to Numerical Geodynamic Modeling. Kaus (2010), # Tectonophysics, v. 484, p. 36-47. Choi et al. (2013), J. Geophys. Res., v. # 118, p. 2429-2444. Keller et al. (2013), Geophys. J. Int., v. 195, p. # 1406-1442. # # The overview below directly follows Moresi et al. (2003) eqns. 23-32. # However, an important distinction between this material model and the # studies above is the use of compositional fields, rather than tracers, to # track individual components of the viscoelastic stress tensor. The # material model will be updated when an option to track and calculate # viscoelastic stresses with tracers is implemented. # # Moresi et al. (2003) begins (eqn. 23) by writing the deviatoric rate of # deformation ($\hat{D}$) as the sum of elastic ($\hat{D_{e}}$) and viscous # ($\hat{D_{v}}$) components: $\hat{D} = \hat{D_{e}} + \hat{D_{v}}$. These # terms further decompose into $\hat{D_{v}} = \frac{\tau}{2\eta}$ and # $\hat{D_{e}} = \frac{\overset{\nabla}{\tau}}{2\mu}$, where $\tau$ is the # viscous deviatoric stress, $\eta$ is the shear viscosity, $\mu$ is the # shear modulus and $\overset{\nabla}{\tau}$ is the Jaumann corotational # stress rate. This later term (eqn. 24) contains the time derivative of the # deviatoric stress ($\dot{\tau}$) and terms that account for material spin # (e.g., rotation) due to advection: $\overset{\nabla}{\tau} = \dot{\tau} + # {\tau}W -W\tau$. Above, $W$ is the material spin tensor (eqn. 25): $W_{ij} # = \frac{1}{2} \left (\frac{\partial V_{i}}{\partial x_{j}} - # \frac{\partial V_{j}}{\partial x_{i}} \right )$. # # The Jaumann stress-rate can also be approximated using terms from the time # at the previous time step ($t$) and current time step ($t + \Delta # t^{e}$): $\smash[t]{\overset{\nabla}{\tau}}^{t + \Delta t^{e}} \approx # \frac{\tau^{t + \Delta t^{e} - \tau^{t}}}{\Delta t^{e}} - W^{t}\tau^{t} + # \tau^{t}W^{t}$. In this material model, the size of the time step above # ($\Delta t^{e}$) can be specified as the numerical time step size or an # independent fixed time step. If the latter case is a selected, the user # has an option to apply a stress averaging scheme to account for the # differences between the numerical and fixed elastic time step (eqn. 32). # If one selects to use a fixed elastic time step throughout the model run, # this can still be achieved by using CFL and maximum time step values that # restrict the numerical time step to a specific time. # # The formulation above allows rewriting the total rate of deformation (eqn. # 29) as $\tau^{t + \Delta t^{e}} = \eta_{eff} \left ( 2\hat{D}^{t + # \triangle t^{e}} + \frac{\tau^{t}}{\mu \Delta t^{e}} + \frac{W^{t}\tau^{t} # - \tau^{t}W^{t}}{\mu} \right )$. # # The effective viscosity (eqn. 28) is a function of the viscosity ($\eta$), # elastic time step size ($\Delta t^{e}$) and shear relaxation time ($ # \alpha = \frac{\eta}{\mu} $): $\eta_{eff} = \eta \frac{\Delta # t^{e}}{\Delta t^{e} + \alpha}$ The magnitude of the shear modulus thus # controls how much the effective viscosity is reduced relative to the # initial viscosity. # # Elastic effects are introduced into the governing Stokes equations through # an elastic force term (eqn. 30) using stresses from the previous time # step: $F^{e,t} = -\frac{\eta_{eff}}{\mu \Delta t^{e}} \tau^{t}$. This # force term is added onto the right-hand side force vector in the system of # equations. # # The value of each compositional field representing distinct rock types at # a point is interpreted to be a volume fraction of that rock type. If the # sum of the compositional field volume fractions is less than one, then the # remainder of the volume is assumed to be 'background material'. # # Several model parameters (densities, elastic shear moduli, thermal # expansivities, thermal conductivies, specific heats) can be defined # per-compositional field. For each material parameter the user supplies a # comma delimited list of length N+1, where N is the number of compositional # fields. The additional field corresponds to the value for background # material. They should be ordered ''background, composition1, # composition2...''. However, the first 3 (2D) or 6 (3D) composition fields # correspond to components of the elastic stress tensor and their material # values will not contribute to the volume fractions. If a single value is # given, then all the compositional fields are given that value. Other # lengths of lists are not allowed. For a given compositional field the # material parameters are treated as constant, except density, which varies # linearly with temperature according to the thermal expansivity. # # When more than one compositional field is present at a point, they are # averaged arithmetically. An exception is viscosity, which may be averaged # arithmetically, harmonically, geometrically, or by selecting the viscosity # of the composition field with the greatest volume fraction. set Model name = simple # default: unspecified subsection Ascii reference profile # Reference conductivity set Thermal conductivity = 4.0 # The temperature dependence of viscosity. Dimensionless exponent. set Thermal viscosity exponent = 0. # A list of depths where the viscosity changes. Values must monotonically # increase. Units: $m$. set Transition depths = 1.5e5, 4.1e5, 6.6e5 # Whether to use the TALA instead of the ALA approximation. set Use TALA = false # Viscosity set Viscosity = 1e21 # A list of prefactors for the viscosity that determine the viscosity # profile. Each prefactor is applied in a depth range specified by the # list of `Transition depths', i.e. the first prefactor is applied above # the first transition depth, the second one between the first and second # transition depth, and so on. To compute the viscosity profile, this # prefactor is multiplied by the reference viscosity specified through the # parameter `Viscosity'. List must have one more entry than Transition # depths. Units: non-dimensional. set Viscosity prefactors = 10., 0.1, 1., 10. subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which # the ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/adiabatic-conditions/ascii-data/ # The file name of the model data. Provide file in format: (Velocity # file name).\%s\%d where \%s is a string specifying the boundary of the # model according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = # Scalar factor, which is applied to the model data. You might want to # use this to scale the input to a reference model. Another way to use # this factor is to convert units of the input files. For instance, if # you provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end end subsection Averaging # Choose the averaging operation to use. set Averaging operation = none # The name of a material model that will be modified by an averaging # operation. Valid values for this parameter are the names of models that # are also valid for the ``Material models/Model name'' parameter. See the # documentation for that for more information. set Base model = simple # The limit normalized distance between 0 and 1 where the bell shape # becomes zero. See the manual for a more information. set Bell shape limit = 1. end subsection Compositing # Material model to use for Compressibility. Valid values for this # parameter are the names of models that are also valid for the ``Material # models/Model name'' parameter. See the documentation for that for more # information. set Compressibility = unspecified # Material model to use for Density. Valid values for this parameter are # the names of models that are also valid for the ``Material models/Model # name'' parameter. See the documentation for that for more information. set Density = unspecified # Material model to use for Entropy derivative pressure. Valid values for # this parameter are the names of models that are also valid for the # ``Material models/Model name'' parameter. See the documentation for that # for more information. set Entropy derivative pressure = unspecified # Material model to use for Entropy derivative temperature. Valid values # for this parameter are the names of models that are also valid for the # ``Material models/Model name'' parameter. See the documentation for that # for more information. set Entropy derivative temperature = unspecified # Material model to use for Reaction terms. Valid values for this # parameter are the names of models that are also valid for the ``Material # models/Model name'' parameter. See the documentation for that for more # information. set Reaction terms = unspecified # Material model to use for Specific heat. Valid values for this parameter # are the names of models that are also valid for the ``Material # models/Model name'' parameter. See the documentation for that for more # information. set Specific heat = unspecified # Material model to use for Thermal conductivity. Valid values for this # parameter are the names of models that are also valid for the ``Material # models/Model name'' parameter. See the documentation for that for more # information. set Thermal conductivity = unspecified # Material model to use for Thermal expansion coefficient. Valid values # for this parameter are the names of models that are also valid for the # ``Material models/Model name'' parameter. See the documentation for that # for more information. set Thermal expansion coefficient = unspecified # Material model to use for Viscosity. Valid values for this parameter are # the names of models that are also valid for the ``Material models/Model # name'' parameter. See the documentation for that for more information. set Viscosity = unspecified end subsection Composition reaction model # A linear dependency of viscosity on the first compositional field. # Dimensionless prefactor. With a value of 1.0 (the default) the viscosity # does not depend on the composition. set Composition viscosity prefactor 1 = 1.0 # A linear dependency of viscosity on the second compositional field. # Dimensionless prefactor. With a value of 1.0 (the default) the viscosity # does not depend on the composition. set Composition viscosity prefactor 2 = 1.0 # If compositional fields are used, then one would frequently want to make # the density depend on these fields. In this simple material model, we # make the following assumptions: if no compositional fields are used in # the current simulation, then the density is simply the usual one with # its linear dependence on the temperature. If there are compositional # fields, then the material model determines how many of them influence # the density. The composition-dependence adds a term of the kind $+\Delta # \rho \; c_1(\mathbf x)$. This parameter describes the value of $\Delta # \rho$. Units: $kg/m^3/\textrm{unit change in composition}$. set Density differential for compositional field 1 = 0. # If compositional fields are used, then one would frequently want to make # the density depend on these fields. In this simple material model, we # make the following assumptions: if no compositional fields are used in # the current simulation, then the density is simply the usual one with # its linear dependence on the temperature. If there are compositional # fields, then the material model determines how many of them influence # the density. The composition-dependence adds a term of the kind $+\Delta # \rho \; c_2(\mathbf x)$. This parameter describes the value of $\Delta # \rho$. Units: $kg/m^3/\textrm{unit change in composition}$. set Density differential for compositional field 2 = 0. # Above this depth the compositional fields react: The first field gets # converted to the second field. Units: $m$. set Reaction depth = 0. # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300 # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250 # The reference temperature $T_0$. Units: $\si{K}$. set Reference temperature = 293. # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the thermal expansion coefficient $\alpha$. Units: $1/K$. set Thermal expansion coefficient = 2e-5 # The temperature dependence of viscosity. Dimensionless exponent. set Thermal viscosity exponent = 0.0 # The value of the constant viscosity. Units: $kg/m/s$. set Viscosity = 5e24 end subsection Depth dependent model # The name of a material model that will be modified by a depth dependent # viscosity. Valid values for this parameter are the names of models that # are also valid for the ``Material models/Model name'' parameter. See the # documentation for that for more information. set Base model = simple # The path to the model data. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = ./ # Method that is used to specify how the viscosity should vary with # depth. set Depth dependence method = None # A comma-separated list of depth values for use with the ``List'' ``Depth # dependence method''. The list must be provided in order of increasing # depth, and the last value must be greater than or equal to the maximal # depth of the model. The depth list is interpreted as a layered viscosity # structure and the depth values specify the maximum depths of each # layer. set Depth list = # The name of the file containing depth-dependent viscosity data. set Viscosity depth file = visc-depth.txt # A comma-separated list of viscosity values, corresponding to the depth # values provided in ``Depth list''. The number of viscosity values # specified here must be the same as the number of depths provided in # ``Depth list'' set Viscosity list = subsection Viscosity depth function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric # value everywhere the constant appears. These values can be defined # using this parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = set Function expression = 1.0e21 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' # (in 3d) for spatial coordinates and `t' for time. You can then use # these variable names in your function expression and they will be # replaced by the values of these variables at which the function is # currently evaluated. However, you can also choose a different set of # names for the independent variables at which to evaluate your function # expression. For example, if you work in spherical coordinates, you may # wish to set this input parameter to `r,phi,theta,t' and then use these # variable names in your function expression. set Variable names = x,t end end subsection Diffusion dislocation # List of activation energies, $E_a$, for background mantle and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $J / mol$ set Activation energies for diffusion creep = 375e3 # List of activation energies, $E_a$, for background mantle and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $J / mol$ set Activation energies for dislocation creep = 530e3 # List of activation volumes, $V_a$, for background mantle and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $m^3 / mol$ set Activation volumes for diffusion creep = 6e-6 # List of activation volumes, $V_a$, for background mantle and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $m^3 / mol$ set Activation volumes for dislocation creep = 1.4e-5 # List of densities, $\rho$, for background mantle and compositional # fields, for a total of N+1 values, where N is the number of # compositional fields. If only one value is given, then all use the same # value. Units: $kg / m^3$ set Densities = 3300. # Scaling coefficient for effective viscosity. set Effective viscosity coefficient = 1.0 # Units: $m$ set Grain size = 1e-3 # List of grain size exponents, $m_{\text{diffusion}}$, for background # mantle and compositional fields, for a total of N+1 values, where N is # the number of compositional fields. If only one value is given, then all # use the same value. Units: None set Grain size exponents for diffusion creep = 3. # The value of the specific heat $C_p$. Units: $J/kg/K$ set Heat capacity = 1.25e3 # Maximum number of iterations to find the correct diffusion/dislocation # strain rate ratio. set Maximum strain rate ratio iterations = 40 # Upper cutoff for effective viscosity. Units: $Pa \, s$ set Maximum viscosity = 1e28 # Stabilizes strain dependent viscosity. Units: $1 / s$ set Minimum strain rate = 1.4e-20 # Lower cutoff for effective viscosity. Units: $Pa \, s$ set Minimum viscosity = 1e17 # List of viscosity prefactors, $A$, for background mantle and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $Pa^{-1} m^{m_{\text{diffusion}}} s^{-1}$ set Prefactors for diffusion creep = 1.5e-15 # List of viscosity prefactors, $A$, for background mantle and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $Pa^{-n_{\text{dislocation}}} s^{-1}$ set Prefactors for dislocation creep = 1.1e-16 # For calculating density by thermal expansivity. Units: $\si{K}$ set Reference temperature = 293. # The reference viscosity that is used for pressure scaling. To understand # how pressure scaling works, take a look at \cite{KHB12}. In particular, # the value of this parameter would not affect the solution computed by # \aspect{} if we could do arithmetic exactly; however, computers do # arithmetic in finite precision, and consequently we need to scale # quantities in ways so that their magnitudes are roughly the same. As # explained in \cite{KHB12}, we scale the pressure during some # computations (never visible by users) by a factor that involves a # reference viscosity. This parameter describes this reference viscosity. # # For problems with a constant viscosity, you will generally want to # choose the reference viscosity equal to the actual viscosity. For # problems with a variable viscosity, the reference viscosity should be a # value that adequately represents the order of magnitude of the # viscosities that appear, such as an average value or the value one would # use to compute a Rayleigh number. # # Units: $Pa \, s$ set Reference viscosity = 1e22 # Tolerance for correct diffusion/dislocation strain rate ratio. set Strain rate residual tolerance = 1e-22 # List of stress exponents, $n_{\text{diffusion}}$, for background mantle # and compositional fields, for a total of N+1 values, where N is the # number of compositional fields. If only one value is given, then all use # the same value. Units: None set Stress exponents for diffusion creep = 1. # List of stress exponents, $n_{\text{dislocation}}$, for background # mantle and compositional fields, for a total of N+1 values, where N is # the number of compositional fields. If only one value is given, then all # use the same value. Units: None set Stress exponents for dislocation creep = 3.5 # Units: $m^2/s$ set Thermal diffusivity = 0.8e-6 # List of thermal expansivities for background mantle and compositional # fields, for a total of N+1 values, where N is the number of # compositional fields. If only one value is given, then all use the same # value. Units: $1 / K$ set Thermal expansivities = 3.5e-5 # When more than one compositional field is present at a point with # different viscosities, we need to come up with an average viscosity at # that point. Select a weighted harmonic, arithmetic, geometric, or # maximum composition. set Viscosity averaging scheme = harmonic end subsection Drucker Prager # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300 # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250 # The reference temperature $T_0$. The reference temperature is used in # the density calculation. Units: $\si{K}$. set Reference temperature = 293. # The reference viscosity that is used for pressure scaling. To understand # how pressure scaling works, take a look at \cite{KHB12}. In particular, # the value of this parameter would not affect the solution computed by # \aspect{} if we could do arithmetic exactly; however, computers do # arithmetic in finite precision, and consequently we need to scale # quantities in ways so that their magnitudes are roughly the same. As # explained in \cite{KHB12}, we scale the pressure during some # computations (never visible by users) by a factor that involves a # reference viscosity. This parameter describes this reference viscosity. # # For problems with a constant viscosity, you will generally want to # choose the reference viscosity equal to the actual viscosity. For # problems with a variable viscosity, the reference viscosity should be a # value that adequately represents the order of magnitude of the # viscosities that appear, such as an average value or the value one would # use to compute a Rayleigh number. # # Units: $Pa \, s$ set Reference viscosity = 1e22 # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the thermal expansion coefficient $\alpha$. Units: $1/K$. set Thermal expansion coefficient = 2e-5 subsection Viscosity # The value of the angle of internal friction $\phi$. For a value of # zero, in 2D the von Mises criterion is retrieved. Angles higher than # 30 degrees are harder to solve numerically. Units: degrees. set Angle of internal friction = 0. # The value of the cohesion $C$. Units: $Pa$. set Cohesion = 2e7 # The value of the maximum viscosity cutoff $\eta_max$. Units: $Pa\;s$. set Maximum viscosity = 1e24 # The value of the minimum viscosity cutoff $\eta_min$. Units: $Pa\;s$. set Minimum viscosity = 1e19 # The value of the initial strain rate prescribed during the first # nonlinear iteration $\dot{\epsilon}_ref$. Units: $1/s$. set Reference strain rate = 1e-15 end end subsection Dynamic Friction # List of densities for background mantle and compositional fields,for a # total of N+M+1 values, where N is the number of compositional fields and # M is the number of phases. If only one value is given, then all use the # same value. Units: $kg / m^3$ set Densities = 3300. # List of specific heats $C_p$ for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $J /kg /K$ set Heat capacities = 1250. # The reference temperature $T_0$. Units: $\si{K}$. set Reference temperature = 293. # List of thermal conductivities for background mantle and compositional # fields,for a total of N+1 values, where N is the number of compositional # fields.If only one value is given, then all use the same value. Units: # $W/m/K$. set Thermal conductivities = 4.7 # List of thermal expansivities for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $1/K$ set Thermal expansivities = 0.000040 # When more than one compositional field is present at a point with # different viscosities, we need to come up with an average viscosity at # that point. Select a weighted harmonic, arithmetic, geometric, or # maximum composition. set Viscosity averaging scheme = harmonic subsection Viscosities # List of background viscosities for mantle and compositional fields,for # a total of N+1 values, where N is the number of compositional # fields.If only one value is given, then all use the same value. Units: # $Pa \, s $ set Background Viscosities = 1.e20 # List of coefficients of dynamic friction for background mantle and # compositional fields,for a total of N+1 values, where N is the number # of compositional fields.If only one value is given, then all use the # same value. Units: $dimensionless$ set Coefficients of dynamic friction = 0.4 # List of coefficients of static friction for background mantle and # compositional fields,for a total of N+1 values, where N is the number # of compositional fields.If only one value is given, then all use the # same value. Units: $dimensionless$ set Coefficients of static friction = 0.5 # List of cohesions for background mantle and compositional fields,for a # total of N+1 values, where N is the number of compositional fields.If # only one value is given, then all use the same value. Units: $Pa$ set Cohesions = 4.e6 # The value of the maximum viscosity cutoff $\eta_max$. Units: $Pa\;s$. set Maximum viscosity = 1e24 # The value of the minimum viscosity cutoff $\eta_min$. Units: $Pa\;s$. set Minimum viscosity = 1e19 # The value of the initial strain rate prescribed during the first # nonlinear iteration $\dot{\epsilon}_ref$. Units: $1/s$. set Reference strain rate = 1e-15 end end subsection Grain size model # This parameter determines whether to advect the logarithm of the grain # size or the grain size itself. The equation and the physics are the # same, but for problems with high grain size gradients it might be # preferable to advect the logarithm. set Advect logarithm of grain size = false # The average specific grain boundary energy $\gamma$. Units: $J/m^2$. set Average specific grain boundary energy = 1.0 # This parameter determines whether to use bilinear interpolation to # compute material properties (slower but more accurate). set Bilinear interpolation = true # The path to the model data. The path may also include the special text # '$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # 'data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/material-model/steinberger/ # The file names of the enthalpy derivatives data. List with as many # components as active compositional fields (material data is assumed to # be in order with the ordering of the fields). set Derivatives file names = # The activation energy for diffusion creep $E_{diff}$. Units: $J/mol$. set Diffusion activation energy = 3.35e5 # The activation volume for diffusion creep $V_{diff}$. Units: $m^3/mol$. set Diffusion activation volume = 4e-6 # The power-law exponent $n_{diff}$ for diffusion creep. Units: none. set Diffusion creep exponent = 1. # The diffusion creep grain size exponent $p_{diff}$ that determines the # dependence of vescosity on grain size. Units: none. set Diffusion creep grain size exponent = 3. # The prefactor for the diffusion creep law $A_{diff}$. Units: # $m^{p_{diff}} Pa^{-n_{diff}}/s$. set Diffusion creep prefactor = 7.4e-15 # The activation energy for dislocation creep $E_{dis}$. Units: $J/mol$. set Dislocation activation energy = 4.8e5 # The activation volume for dislocation creep $V_{dis}$. Units: # $m^3/mol$. set Dislocation activation volume = 1.1e-5 # The power-law exponent $n_{dis}$ for dislocation creep. Units: none. set Dislocation creep exponent = 3.5 # The prefactor for the dislocation creep law $A_{dis}$. Units: # $Pa^{-n_{dis}}/s$. set Dislocation creep prefactor = 4.5e-15 # We need to perform an iteration inside the computation of the # dislocation viscosity, because it depends on the dislocation strain # rate, which depends on the dislocation viscosity itself. This number # determines the maximum number of iterations that are performed. set Dislocation viscosity iteration number = 100 # We need to perform an iteration inside the computation of the # dislocation viscosity, because it depends on the dislocation strain # rate, which depends on the dislocation viscosity itself. This number # determines the termination accuracy, i.e. if the dislocation viscosity # changes by less than this factor we terminate the iteration. set Dislocation viscosity iteration threshold = 1e-3 # The geometric constant $c$ used in the paleowattmeter grain size # reduction law. Units: none. set Geometric constant = 3. # The activation energy for grain growth $E_g$. Units: $J/mol$. set Grain growth activation energy = 3.5e5 # The activation volume for grain growth $V_g$. Units: $m^3/mol$. set Grain growth activation volume = 8e-6 # The exponent of the grain growth law $p_g$. This is an experimentally # determined grain growth constant. Units: none. set Grain growth exponent = 3. # The prefactor for the Ostwald ripening grain growth law $G_0$. This is # dependent on water content, which is assumed to be 50 H/$10^6$ Si for # the default value. Units: $m^{p_g}/s$. set Grain growth rate constant = 1.5e-5 # A scaling factor for the grain size in the lower mantle. In models where # the high grain size contrast between the upper and lower mantle causes # numerical problems, the grain size in the lower mantle can be scaled to # a larger value, simultaneously scaling the viscosity prefactors and # grain growth parameters to keep the same physical behavior. Differences # to the original formulation only occur when material with a smaller # grain size than the recrystallization grain size cross the upper-lower # mantle boundary. The real grain size can be obtained by dividing the # model grain size by this value. Units: none. set Lower mantle grain size scaling = 1.0 # The material file format to be read in the property tables. set Material file format = perplex # The file names of the material data. List with as many components as # active compositional fields (material data is assumed to be in order # with the ordering of the fields). set Material file names = pyr-ringwood88.txt # The maximum number of substeps over the temperature pressure range to # calculate the averaged enthalpy gradient over a cell. set Maximum latent heat substeps = 1 # The maximum specific heat that is allowed in the whole model domain. # Units: J/kg/K. set Maximum specific heat = 6000. # The factor by which viscosity at adiabatic temperature and ambient # temperature are allowed to differ (a value of x means that the viscosity # can be x times higher or x times lower compared to the value at # adiabatic temperature. This parameter is introduced to limit local # viscosity contrasts, but still allow for a widely varying viscosity over # the whole mantle range. Units: none. set Maximum temperature dependence of viscosity = 100. # The maximum thermal expansivity that is allowed in the whole model # domain. Units: 1/K. set Maximum thermal expansivity = 1e-3 # The maximum viscosity that is allowed in the whole model domain. Units: # Pa \, s. set Maximum viscosity = 1e26 # The minimum grain size that is used for the material model. This # parameter is introduced to limit local viscosity contrasts, but still # allows for a widely varying viscosity over the whole mantle range. # Units: $\si{m}$. set Minimum grain size = 1e-5 # The minimum specific heat that is allowed in the whole model domain. # Units: J/kg/K. set Minimum specific heat = 500. # The minimum thermal expansivity that is allowed in the whole model # domain. Units: 1/K. set Minimum thermal expansivity = 1e-5 # The minimum viscosity that is allowed in the whole model domain. Units: # Pa \, s. set Minimum viscosity = 1e18 # A list of Clapeyron slopes for each phase transition. A positive # Clapeyron slope indicates that the phase transition will occur in a # greater depth, if the temperature is higher than the one given in Phase # transition temperatures and in a smaller depth, if the temperature is # smaller than the one given in Phase transition temperatures. For # negative slopes the other way round. List must have the same number of # entries as Phase transition depths. Units: $Pa/K$. set Phase transition Clapeyron slopes = # A list of depths where phase transitions occur. Values must # monotonically increase. Units: $m$. set Phase transition depths = # A list of temperatures where phase transitions occur. Higher or lower # temperatures lead to phase transition ocurring in smaller or greater # depths than given in Phase transition depths, depending on the Clapeyron # slope given in Phase transition Clapeyron slopes. List must have the # same number of entries as Phase transition depths. Units: $\si{K}$. set Phase transition temperatures = # A list of widths for each phase transition. This is only use to specify # the region where the recrystallized grain size is assigned after # material has crossed a phase transition and should accordingly be chosen # similar to the maximum cell width expected at the phase transition.List # must have the same number of entries as Phase transition depths. Units: # $m$. set Phase transition widths = # This parameter ($\lambda$) gives an estimate of the strain necessary to # achieve a new grain size. set Reciprocal required strain = 10. # The grain size $d_{ph}$ to that a phase will be reduced to when crossing # a phase transition. When set to zero, grain size will not be reduced. # Units: $\si{m}$. set Recrystallized grain size = # The value of the reference compressibility. Units: $1/Pa$. set Reference compressibility = 4e-12 # The reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300 # The value of the specific heat $cp$. Units: $J/kg/K$. set Reference specific heat = 1250. # The reference temperature $T_0$. Units: $\si{K}$. set Reference temperature = 293. # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the thermal expansion coefficient $\alpha$. Units: $1/K$. set Thermal expansion coefficient = 2e-5 # This parameter determines whether to use the enthalpy to calculate the # thermal expansivity and specific heat (if true) or use the thermal # expansivity and specific heat values from the material properties table # directly (if false). set Use enthalpy for material properties = true # A flag indicating whether the computation should be use the # paleowattmeter approach of Austin and Evans (2007) for grain size # reduction in the dislocation creep regime (if true) or the # paleopiezometer approach from Hall and Parmetier (2003) (if false). set Use paleowattmeter = true # This parameter determines whether to use the table properties also for # density, thermal expansivity and specific heat. If false the properties # are generated as in the simple compressible plugin. set Use table properties = false # The value of the constant viscosity. Units: $kg/m/s$. set Viscosity = 5e24 # The fraction $\chi$ of work done by dislocation creep to change the # grain boundary area. Units: $J/m^2$. set Work fraction for boundary area change = 0.1 end subsection Latent heat # A linear dependency of viscosity on composition. Dimensionless # prefactor. set Composition viscosity prefactor = 1.0 # The value of the compressibility $\kappa$. Units: $1/Pa$. set Compressibility = 5.124e-12 # A list of phases, which correspond to the Phase transition density # jumps. The density jumps occur only in the phase that is given by this # phase value. 0 stands for the 1st compositional fields, 1 for the second # compositional field and -1 for none of them. List must have the same # number of entries as Phase transition depths. Units: $Pa/K$. set Corresponding phase for density jump = # Whether to list phase transitions by depth or pressure. If this # parameter is true, then the input file will use Phase transitions depths # and Phase transition widths to define the phase transition. If it is # false, the parameter file will read in phase transition data from Phase # transition pressures and Phase transition pressure widths. set Define transition by depth instead of pressure = true # If compositional fields are used, then one would frequently want to make # the density depend on these fields. In this simple material model, we # make the following assumptions: if no compositional fields are used in # the current simulation, then the density is simply the usual one with # its linear dependence on the temperature. If there are compositional # fields, then the density only depends on the first one in such a way # that the density has an additional term of the kind $+\Delta \rho \; # c_1(\mathbf x)$. This parameter describes the value of $\Delta \rho$. # Units: $kg/m^3/\textrm{unit change in composition}$. set Density differential for compositional field 1 = 0. # Limit for the maximum viscosity in the model. Units: Pa \, s. set Maximum viscosity = 1e24 # Limit for the minimum viscosity in the model. Units: Pa \, s. set Minimum viscosity = 1e19 # A list of Clapeyron slopes for each phase transition. A positive # Clapeyron slope indicates that the phase transition will occur in a # greater depth, if the temperature is higher than the one given in Phase # transition temperatures and in a smaller depth, if the temperature is # smaller than the one given in Phase transition temperatures. For # negative slopes the other way round. List must have the same number of # entries as Phase transition depths. Units: $Pa/K$. set Phase transition Clapeyron slopes = # A list of density jumps at each phase transition. A positive value means # that the density increases with depth. The corresponding entry in # Corresponding phase for density jump determines if the density jump # occurs in peridotite, eclogite or none of them.List must have the same # number of entries as Phase transition depths. Units: $kg/m^3$. set Phase transition density jumps = # A list of depths where phase transitions occur. Values must # monotonically increase. Units: $m$. set Phase transition depths = # A list of widths for each phase transition, in terms of pressure. The # phase functions are scaled with these values, leading to a jump between # phases for a value of zero and a gradual transition for larger values. # List must have the same number of entries as Phase transition pressures. # Define transition by depth instead of pressure must be set to false to # use this parameter. Units: $Pa$. set Phase transition pressure widths = # A list of pressures where phase transitions occur. Values must # monotonically increase. Define transition by depth instead of pressure # must be set to false to use this parameter. Units: $Pa$. set Phase transition pressures = # A list of temperatures where phase transitions occur. Higher or lower # temperatures lead to phase transition occurring in smaller or greater # depths than given in Phase transition depths, depending on the Clapeyron # slope given in Phase transition Clapeyron slopes. List must have the # same number of entries as Phase transition depths. Units: $\si{K}$. set Phase transition temperatures = # A list of widths for each phase transition, in terms of depth. The phase # functions are scaled with these values, leading to a jump between phases # for a value of zero and a gradual transition for larger values. List # must have the same number of entries as Phase transition depths. Units: # $m$. set Phase transition widths = # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300. # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250. # The reference temperature $T_0$. Units: $\si{K}$. set Reference temperature = 293. # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 2.38 # The value of the thermal expansion coefficient $\beta$. Units: $1/K$. set Thermal expansion coefficient = 4e-5 # The temperature dependence of viscosity. Dimensionless exponent. set Thermal viscosity exponent = 0.0 # The value of the constant viscosity. Units: $kg/m/s$. set Viscosity = 5e24 # A list of prefactors for the viscosity for each phase. The reference # viscosity will be multiplied by this factor to get the corresponding # viscosity for each phase. List must have one more entry than Phase # transition depths. Units: non-dimensional. set Viscosity prefactors = end subsection Latent heat melt # Constant parameter in the quadratic function that approximates the # solidus of peridotite. Units: $\degree C$. set A1 = 1085.7 # Prefactor of the linear pressure term in the quadratic function that # approximates the solidus of peridotite. Units: $\degree C/Pa$. set A2 = 1.329e-7 # Prefactor of the quadratic pressure term in the quadratic function that # approximates the solidus of peridotite. Units: $\degree C/(Pa^2)$. set A3 = -5.1e-18 # Constant parameter in the quadratic function that approximates the # lherzolite liquidus used for calculating the fraction of # peridotite-derived melt. Units: $\degree C$. set B1 = 1475.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the lherzolite liquidus used for calculating the fraction # of peridotite-derived melt. Units: $\degree C/Pa$. set B2 = 8.0e-8 # Prefactor of the quadratic pressure term in the quadratic function that # approximates the lherzolite liquidus used for calculating the fraction # of peridotite-derived melt. Units: $\degree C/(Pa^2)$. set B3 = -3.2e-18 # Constant parameter in the quadratic function that approximates the # liquidus of peridotite. Units: $\degree C$. set C1 = 1780.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the liquidus of peridotite. Units: $\degree C/Pa$. set C2 = 4.50e-8 # Prefactor of the quadratic pressure term in the quadratic function that # approximates the liquidus of peridotite. Units: $\degree C/(Pa^2)$. set C3 = -2.0e-18 # A linear dependency of viscosity on composition. Dimensionless # prefactor. set Composition viscosity prefactor = 1.0 # The value of the compressibility $\kappa$. Units: $1/Pa$. set Compressibility = 5.124e-12 # Constant parameter in the quadratic function that approximates the # solidus of pyroxenite. Units: $\degree C$. set D1 = 976.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the solidus of pyroxenite. Note that this factor is # different from the value given in Sobolev, 2011, because they use the # potential temperature whereas we use the absolute temperature. Units: # $\degree C/Pa$. set D2 = 1.329e-7 # Prefactor of the quadratic pressure term in the quadratic function that # approximates the solidus of pyroxenite. Units: $\degree C/(Pa^2)$. set D3 = -5.1e-18 # If compositional fields are used, then one would frequently want to make # the density depend on these fields. In this simple material model, we # make the following assumptions: if no compositional fields are used in # the current simulation, then the density is simply the usual one with # its linear dependence on the temperature. If there are compositional # fields, then the density only depends on the first one in such a way # that the density has an additional term of the kind $+\Delta \rho \; # c_1(\mathbf x)$. This parameter describes the value of $\Delta \rho$. # Units: $kg/m^3/\textrm{unit change in composition}$. set Density differential for compositional field 1 = 0. # Prefactor of the linear depletion term in the quadratic function that # approximates the melt fraction of pyroxenite. Units: $\degree C/Pa$. set E1 = 663.8 # Prefactor of the quadratic depletion term in the quadratic function that # approximates the melt fraction of pyroxenite. Units: $\degree # C/(Pa^2)$. set E2 = -611.4 # Mass fraction of clinopyroxene in the peridotite to be molten. Units: # non-dimensional. set Mass fraction cpx = 0.15 # Maximum melt fraction of pyroxenite in this parameterization. At higher # temperatures peridotite begins to melt. set Maximum pyroxenite melt fraction = 0.5429 # The entropy change for the phase transition from solid to melt of # peridotite. Units: $J/(kg K)$. set Peridotite melting entropy change = -300. # The entropy change for the phase transition from solid to melt of # pyroxenite. Units: $J/(kg K)$. set Pyroxenite melting entropy change = -400. # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300. # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250. # The reference temperature $T_0$. Units: $\si{K}$. set Reference temperature = 293. # The relative density of melt compared to the solid material. This means, # the density change upon melting is this parameter times the density of # solid material.Units: non-dimensional. set Relative density of melt = 0.9 # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 2.38 # The value of the thermal expansion coefficient $\alpha_s$. Units: # $1/K$. set Thermal expansion coefficient = 4e-5 # The value of the thermal expansion coefficient $\alpha_f$. Units: # $1/K$. set Thermal expansion coefficient of melt = 6.8e-5 # The temperature dependence of viscosity. Dimensionless exponent. set Thermal viscosity exponent = 0.0 # The value of the constant viscosity. Units: $kg/m/s$. set Viscosity = 5e24 # Exponent of the melting temperature in the melt fraction calculation. # Units: non-dimensional. set beta = 1.5 # Constant in the linear function that approximates the clinopyroxene # reaction coefficient. Units: non-dimensional. set r1 = 0.5 # Prefactor of the linear pressure term in the linear function that # approximates the clinopyroxene reaction coefficient. Units: $1/Pa$. set r2 = 8e-11 end subsection Melt global # The density contrast between material with a depletion of 1 and a # depletion of zero. Negative values indicate lower densities of depleted # material. Depletion is indicated by the compositional field with the # name peridotite. Not used if this field does not exist in the model. # Units: $kg/m^3$. set Depletion density change = 0.0 # The solidus temperature change for a depletion of 100\%. For positive # values, the solidus gets increased for a positive peridotite field # (depletion) and lowered for a negative peridotite field (enrichment). # Scaling with depletion is linear. Only active when fractional melting is # used. Units: $\si{K}$. set Depletion solidus change = 200.0 # $\alpha_F$: exponential dependency of viscosity on the depletion field # $F$ (called peridotite). Dimensionless factor. With a value of 0.0 (the # default) the viscosity does not depend on the depletion. The effective # viscosity increasedue to depletion is defined as $exp( \alpha_F * F)$. # Rationale: melting dehydrates the source rock by removing most of the # volatiles,and makes it stronger. Hirth and Kohlstedt (1996) report # typical values around a factor 100 to 1000 viscosity contrast between # wet and dry rocks, although some experimental studies report a smaller # (factor 10) contrast (e.g. Fei et al., 2013). set Exponential depletion strengthening factor = 0.0 # The porosity dependence of the viscosity. Units: dimensionless. set Exponential melt weakening factor = 27. # Whether to include melting and freezing (according to a simplified # linear melting approximation in the model (if true), or not (if false). set Include melting and freezing = true # $\Delta \eta_{F,max}$: maximum depletion strengthening of viscosity. # Rationale: melting dehydrates the source rock by removing most of the # volatiles,and makes it stronger. Hirth and Kohlstedt (1996) report # typical values around a factor 100 to 1000 viscosity contrast between # wet and dry rocks, although some experimental studies report a smaller # (factor 10) contrast (e.g. Fei et al., 2013). set Maximum Depletion viscosity change = 1.0e3 # The value of the pressure derivative of the melt bulk modulus. Units: # None. set Melt bulk modulus derivative = 0.0 # The value of the compressibility of the melt. Units: $1/Pa$. set Melt compressibility = 0.0 # In case the operator splitting scheme is used, the porosity field can # not be set to a new equilibrium melt fraction instantly, but the model # has to provide a melting time scale instead. This time scale defines how # fast melting happens, or more specifically, the parameter defines the # time after which the deviation of the porosity from the equilibrium melt # fraction will be reduced to a fraction of $1/e$. So if the melting time # scale is small compared to the time step size, the reaction will be so # fast that the porosity is very close to the equilibrium melt fraction # after reactions are computed. Conversely, if the melting time scale is # large compared to the time step size, almost no melting and freezing # will occur. # # Also note that the melting time scale has to be larger than or equal to # the reaction time step used in the operator splitting scheme, otherwise # reactions can not be computed. If the model does not use operator # splitting, this parameter is not used. Units: yr or s, depending on the # ``Use years in output instead of seconds'' parameter. set Melting time scale for operator splitting = 1e3 # The linear solidus temperature change with pressure. For positive # values, the solidus gets increased for positive pressures. Units: # $1/Pa$. set Pressure solidus change = 6e-8 # The value of the constant bulk viscosity $\xi_0$ of the solid matrix. # This viscosity may be modified by both temperature and porosity # dependencies. Units: $Pa \, s$. set Reference bulk viscosity = 1e22 # Reference density of the melt/fluid$\rho_{f,0}$. Units: $kg/m^3$. set Reference melt density = 2500. # The value of the constant melt viscosity $\eta_f$. Units: $Pa \, s$. set Reference melt viscosity = 10. # Reference permeability of the solid host rock.Units: $m^2$. set Reference permeability = 1e-8 # The value of the constant viscosity $\eta_0$ of the solid matrix. This # viscosity may be modified by both temperature and porosity dependencies. # Units: $Pa \, s$. set Reference shear viscosity = 5e20 # Reference density of the solid $\rho_{s,0}$. Units: $kg/m^3$. set Reference solid density = 3000. # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250. # The reference temperature $T_0$. The reference temperature is used in # both the density and viscosity formulas. Units: $\si{K}$. set Reference temperature = 293. # The value of the compressibility of the solid matrix. Units: $1/Pa$. set Solid compressibility = 0.0 # Solidus for a pressure of zero. Units: $\si{K}$. set Surface solidus = 1300. # The temperature dependence of the bulk viscosity. Dimensionless # exponent. See the general documentation of this model for a formula that # states the dependence of the viscosity on this factor, which is called # $\beta$ there. set Thermal bulk viscosity exponent = 0.0 # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the thermal expansion coefficient $\beta$. Units: $1/K$. set Thermal expansion coefficient = 2e-5 # The temperature dependence of the shear viscosity. Dimensionless # exponent. See the general documentation of this model for a formula that # states the dependence of the viscosity on this factor, which is called # $\beta$ there. set Thermal viscosity exponent = 0.0 end subsection Melt simple # Constant parameter in the quadratic function that approximates the # solidus of peridotite. Units: $\degree C$. set A1 = 1085.7 # Prefactor of the linear pressure term in the quadratic function that # approximates the solidus of peridotite. Units: $\degree C/Pa$. set A2 = 1.329e-7 # Prefactor of the quadratic pressure term in the quadratic function that # approximates the solidus of peridotite. Units: $\degree C/(Pa^2)$. set A3 = -5.1e-18 # Constant parameter in the quadratic function that approximates the # lherzolite liquidus used for calculating the fraction of # peridotite-derived melt. Units: $\degree C$. set B1 = 1475.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the lherzolite liquidus used for calculating the fraction # of peridotite-derived melt. Units: $\degree C/Pa$. set B2 = 8.0e-8 # Prefactor of the quadratic pressure term in the quadratic function that # approximates the lherzolite liquidus used for calculating the fraction # of peridotite-derived melt. Units: $\degree C/(Pa^2)$. set B3 = -3.2e-18 # Constant parameter in the quadratic function that approximates the # liquidus of peridotite. Units: $\degree C$. set C1 = 1780.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the liquidus of peridotite. Units: $\degree C/Pa$. set C2 = 4.50e-8 # Prefactor of the quadratic pressure term in the quadratic function that # approximates the liquidus of peridotite. Units: $\degree C/(Pa^2)$. set C3 = -2.0e-18 # The density contrast between material with a depletion of 1 and a # depletion of zero. Negative values indicate lower densities of depleted # material. Depletion is indicated by the compositional field with the # name peridotite. Not used if this field does not exist in the model. # Units: $kg/m^3$. set Depletion density change = 0.0 # The solidus temperature change for a depletion of 100\%. For positive # values, the solidus gets increased for a positive peridotite field # (depletion) and lowered for a negative peridotite field (enrichment). # Scaling with depletion is linear. Only active when fractional melting is # used. Units: $\si{K}$. set Depletion solidus change = 200.0 # The porosity dependence of the viscosity. Units: dimensionless. set Exponential melt weakening factor = 27. # Freezing rate of melt when in subsolidus regions. If this parameter is # set to a number larger than 0.0, it specifies the fraction of melt that # will freeze per year (or per second, depending on the ``Use years in # output instead of seconds'' parameter), as soon as the porosity exceeds # the equilibrium melt fraction, and the equilibrium melt fraction falls # below the depletion. In this case, melt will freeze according to the # given rate until one of those conditions is not fulfilled anymore. The # reasoning behind this is that there should not be more melt present than # the equilibrium melt fraction, as melt production decreases with # increasing depletion, but the freezing process of melt also reduces the # depletion by the same amount, and as soon as the depletion falls below # the equilibrium melt fraction, we expect that material should melt again # (no matter how much melt is present). This is quite a simplification and # not a realistic freezing parameterization, but without tracking the melt # composition, there is no way to compute freezing rates accurately. If # this parameter is set to zero, no freezing will occur. Note that # freezing can never be faster than determined by the ``Melting time scale # for operator splitting''. The product of the ``Freezing rate'' and the # ``Melting time scale for operator splitting'' defines how fast freezing # occurs with respect to melting (if the product is 0.5, melting will # occur twice as fast as freezing). Units: 1/yr or 1/s, depending on the # ``Use years in output instead of seconds'' parameter. set Freezing rate = 0.0 # Mass fraction of clinopyroxene in the peridotite to be molten. Units: # non-dimensional. set Mass fraction cpx = 0.15 # The value of the pressure derivative of the melt bulk modulus. Units: # None. set Melt bulk modulus derivative = 0.0 # The value of the compressibility of the melt. Units: $1/Pa$. set Melt compressibility = 0.0 # Depth above that melt will be extracted from the model, which is done by # a negative reaction term proportional to the porosity field. Units: # $m$. set Melt extraction depth = 1000.0 # Because the operator splitting scheme is used, the porosity field can # not be set to a new equilibrium melt fraction instantly, but the model # has to provide a melting time scale instead. This time scale defines how # fast melting happens, or more specifically, the parameter defines the # time after which the deviation of the porosity from the equilibrium melt # fraction will be reduced to a fraction of $1/e$. So if the melting time # scale is small compared to the time step size, the reaction will be so # fast that the porosity is very close to the equilibrium melt fraction # after reactions are computed. Conversely, if the melting time scale is # large compared to the time step size, almost no melting and freezing # will occur. # # Also note that the melting time scale has to be larger than or equal to # the reaction time step used in the operator splitting scheme, otherwise # reactions can not be computed. Units: yr or s, depending on the ``Use # years in output instead of seconds'' parameter. set Melting time scale for operator splitting = 1e3 # The entropy change for the phase transition from solid to melt of # peridotite. Units: $J/(kg K)$. set Peridotite melting entropy change = -300. # The value of the constant bulk viscosity $\xi_0$ of the solid matrix. # This viscosity may be modified by both temperature and porosity # dependencies. Units: $Pa \, s$. set Reference bulk viscosity = 1e22 # Reference density of the melt/fluid$\rho_{f,0}$. Units: $kg/m^3$. set Reference melt density = 2500. # The value of the constant melt viscosity $\eta_f$. Units: $Pa \, s$. set Reference melt viscosity = 10. # Reference permeability of the solid host rock.Units: $m^2$. set Reference permeability = 1e-8 # The value of the constant viscosity $\eta_0$ of the solid matrix. This # viscosity may be modified by both temperature and porosity dependencies. # Units: $Pa \, s$. set Reference shear viscosity = 5e20 # Reference density of the solid $\rho_{s,0}$. Units: $kg/m^3$. set Reference solid density = 3000. # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250. # The reference temperature $T_0$. The reference temperature is used in # both the density and viscosity formulas. Units: $\si{K}$. set Reference temperature = 293. # The value of the compressibility of the solid matrix. Units: $1/Pa$. set Solid compressibility = 0.0 # The temperature dependence of the bulk viscosity. Dimensionless # exponent. See the general documentation of this model for a formula that # states the dependence of the viscosity on this factor, which is called # $\beta$ there. set Thermal bulk viscosity exponent = 0.0 # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the thermal expansion coefficient $\beta$. Units: $1/K$. set Thermal expansion coefficient = 2e-5 # The temperature dependence of the shear viscosity. Dimensionless # exponent. See the general documentation of this model for a formula that # states the dependence of the viscosity on this factor, which is called # $\beta$ there. set Thermal viscosity exponent = 0.0 # If fractional melting should be used (if true), including a solidus # change based on depletion (in this case, the amount of melt that has # migrated away from its origin), and freezing of melt when it has moved # to a region with temperatures lower than the solidus; or if batch # melting should be used (if false), assuming that the melt fraction only # depends on temperature and pressure, and how much melt has already been # generated at a given point, but not considering movement of melt in the # melting parameterization. # # Note that melt does not freeze unless the 'Freezing rate' parameter is # set to a value larger than 0. set Use fractional melting = false # If the compressibility should be used everywhere in the code (if true), # changing the volume of material when the density changes, or only in the # momentum conservation and advection equations (if false). set Use full compressibility = false # Exponent of the melting temperature in the melt fraction calculation. # Units: non-dimensional. set beta = 1.5 # Constant in the linear function that approximates the clinopyroxene # reaction coefficient. Units: non-dimensional. set r1 = 0.5 # Prefactor of the linear pressure term in the linear function that # approximates the clinopyroxene reaction coefficient. Units: $1/Pa$. set r2 = 8e-11 end subsection Modified Tait model # The Einstein temperature at the reference pressure and temperature. # Units: $\text{K}$. set Einstein temperature = 600. # The value of the first pressure derivative of the isothermal bulk # modulus at the reference pressure and temperature. Units: []. set Reference bulk modulus derivative = 4. # The density at the reference pressure and temperature. Units: # $\text{kg}/\text{m}^3$. set Reference density = 3300. # The isothermal bulk modulus at the reference pressure and temperature. # Units: $\text{Pa}$. set Reference isothermal bulk modulus = 125e9 # Reference pressure $P_0$. Units: $\text{Pa}$. set Reference pressure = 1e5 # Reference temperature $T_0$. Units: $\text{K}$. set Reference temperature = 298.15 # The thermal expansion coefficient at the reference pressure and # temperature. Units: $1/\text{K}$. set Reference thermal expansivity = 2e-5 # The value of the constant thermal conductivity $k$. Units: # $\text{W}/\text{m}/\text{K}$. set Thermal conductivity = 4.7 # The value of the constant viscosity $\eta_0$. Units: $\text{Pas}$. set Viscosity = 1e21 subsection Reference heat capacity function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric # value everywhere the constant appears. These values can be defined # using this parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = set Function expression = 1.25e3 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' # (in 3d) for spatial coordinates and `t' for time. You can then use # these variable names in your function expression and they will be # replaced by the values of these variables at which the function is # currently evaluated. However, you can also choose a different set of # names for the independent variables at which to evaluate your function # expression. For example, if you work in spherical coordinates, you may # wish to set this input parameter to `r,phi,theta,t' and then use these # variable names in your function expression. set Variable names = x,t end end subsection Multicomponent # List of densities for background mantle and compositional fields,for a # total of N+M+1 values, where N is the number of compositional fields and # M is the number of phases. If only one value is given, then all use the # same value. Units: $kg / m^3$ set Densities = 3300. # List of specific heats $C_p$ for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $J /kg /K$ set Heat capacities = 1250. # The reference temperature $T_0$. Units: $\si{K}$. set Reference temperature = 293. # List of thermal conductivities for background mantle and compositional # fields,for a total of N+1 values, where N is the number of compositional # fields.If only one value is given, then all use the same value. Units: # $W/m/K$. set Thermal conductivities = 4.7 # List of thermal expansivities for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $1/K$ set Thermal expansivities = 0.000040 # List of viscosities for background mantle and compositional fields,for a # total of N+1 values, where N is the number of compositional fields.If # only one value is given, then all use the same value. Units: $Pa \, s$ set Viscosities = 1.e21 # When more than one compositional field is present at a point with # different viscosities, we need to come up with an average viscosity at # that point. Select a weighted harmonic, arithmetic, geometric, or # maximum composition. set Viscosity averaging scheme = harmonic end subsection Multicomponent compressible # List of isochoric specific heats $C_v$ for background mantle and # compositional fields,for a total of N+1 values, where N is the number of # compositional fields.If only one value is given, then all use the same # value. Units: $J /kg /K$ set Isochoric specific heats = 1250. # List of isothermal pressure derivatives of the bulk moduli for # background mantle and compositional fields,for a total of N+1 values, # where N is the number of compositional fields.If only one value is # given, then all use the same value. Units: []. set Isothermal bulk modulus pressure derivatives = 4. # List of densities for background mantle and compositional fields,for a # total of N+1 values, where N is the number of compositional fields.If # only one value is given, then all use the same value. Units: $kg / # m^3$ set Reference densities = 3300. # List of isothermal compressibilities for background mantle and # compositional fields,for a total of N+1 values, where N is the number of # compositional fields.If only one value is given, then all use the same # value. Units: $1/Pa$. set Reference isothermal compressibilities = 4e-12 # List of reference temperatures $T_0$ for background mantle and # compositional fields,for a total of N+1 values, where N is the number of # compositional fields.If only one value is given, then all use the same # value. Units: $K$. set Reference temperatures = 298.15 # List of thermal expansivities for background mantle and compositional # fields,for a total of N+1 values, where N is the number of compositional # fields.If only one value is given, then all use the same value. Units: # $1/K$ set Reference thermal expansivities = 4.e-5 # List of thermal conductivities for background mantle and compositional # fields,for a total of N+1 values, where N is the number of compositional # fields.If only one value is given, then all use the same value. Units: # $W/m/K$. set Thermal conductivities = 4.7 # List of viscosities for background mantle and compositional fields,for a # total of N+1 values, where N is the number of compositional fields.If # only one value is given, then all use the same value. Units: $Pa \, s$ set Viscosities = 1.e21 # When more than one compositional field is present at a point with # different viscosities, we need to come up with an average viscosity at # that point. Select a weighted harmonic, arithmetic, geometric, or # maximum composition. set Viscosity averaging scheme = harmonic end subsection Nondimensional model # Dissipation number. Pick 0.0 for incompressible computations. set Di = 0.0 # Rayleigh number Ra set Ra = 1e4 # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 1.0 # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1.0 # Whether to use the TALA instead of the ALA approximation. set Use TALA = false # Exponential depth prefactor for viscosity. set Viscosity depth prefactor = 0.0 # Exponential temperature prefactor for viscosity. set Viscosity temperature prefactor = 0.0 # Grueneisen parameter set gamma = 1.0 end subsection PerpleX lookup model # The value of the maximum pressure used to query PerpleX. Units: $Pa$. set Maximum material pressure = 1.e12 # The value of the maximum temperature used to query PerpleX. Units: # $\si{K}$. set Maximum material temperature = 6000. # The value of the minimum pressure used to query PerpleX. Units: $Pa$. set Minimum material pressure = 1.e5 # The value of the minimum temperature used to query PerpleX. Units: # $\si{K}$. set Minimum material temperature = 0. # The name of the PerpleX input file (should end with .dat). set PerpleX input file name = rock.dat # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the viscosity $\eta$. Units: $kg/m/s$. set Viscosity = 5e24 end subsection Replace lithosphere viscosity # The name of a material model that will be modified by a replacingthe # viscosity in the lithosphere by a constant value. Valid values for this # parameter are the names of models that are also valid for the ``Material # models/Model name'' parameter. See the documentation for more # information. set Base model = simple # The path to the LAB depth data file set Data directory = $ASPECT_SOURCE_DIR/data/initial-temperature/lithosphere-mask/ # Method that is used to specify the depth of the # lithosphere-asthenosphere boundary. set Depth specification method = Value # File from which the lithosphere-asthenosphere boundary depth data is # read. set LAB depth filename = LAB_CAM2016.txt # The viscosity within lithosphere, applied abovethe maximum lithosphere # depth. set Lithosphere viscosity = 1e23 # Units: m.The maximum depth of the lithosphere. The model will be NaNs # below this depth. set Maximum lithosphere depth = 200000.0 end subsection Simple compressible model # The value of the reference compressibility. Units: $1/Pa$. set Reference compressibility = 4e-12 # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300. # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250. # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the thermal expansion coefficient $\alpha$. Units: $1/K$. set Thermal expansion coefficient = 2e-5 # The value of the viscosity $\eta$. Units: $kg/m/s$ or $Pa s$. set Viscosity = 1000000000000000000000.000000 end subsection Simple model # A linear dependency of viscosity on the first compositional field. # Dimensionless prefactor. With a value of 1.0 (the default) the viscosity # does not depend on the composition. See the general documentation of # this model for a formula that states the dependence of the viscosity on # this factor, which is called $\xi$ there. set Composition viscosity prefactor = 1.0 # If compositional fields are used, then one would frequently want to make # the density depend on these fields. In this simple material model, we # make the following assumptions: if no compositional fields are used in # the current simulation, then the density is simply the usual one with # its linear dependence on the temperature. If there are compositional # fields, then the material model determines how many of them influence # the density. The composition-dependence adds a term of the kind $+\Delta # \rho \; c_1(\mathbf x)$. This parameter describes the value of $\Delta # \rho$. Units: $kg/m^3/\textrm{unit change in composition}$. set Density differential for compositional field 1 = 0. # The maximum value of the viscosity prefactor associated with temperature # dependence. set Maximum thermal prefactor = 1.0e6 # default: 1.0e2 # The minimum value of the viscosity prefactor associated with temperature # dependence. set Minimum thermal prefactor = 1.0e-6 # default: 1.0e-2 # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300.0 # default: 3300 # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250.0 # default: 1250 # The reference temperature $T_0$. The reference temperature is used in # both the density and viscosity formulas. Units: $\si{K}$. set Reference temperature = 1600.0 # default: 293. # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 1.0 # default: 4.7 # The value of the thermal expansion coefficient $\alpha$. Units: $1/K$. set Thermal expansion coefficient = 2.0e-5 # default: 2e-5 # The temperature dependence of viscosity. Dimensionless exponent. See the # general documentation of this model for a formula that states the # dependence of the viscosity on this factor, which is called $\beta$ # there. set Thermal viscosity exponent = 0.0 # The value of the constant viscosity $\eta_0$. This viscosity may be # modified by both temperature and compositional dependencies. Units: # $kg/m/s$. set Viscosity = 1.0 # default: 5e24 end subsection Simpler model # Reference density $\rho_0$. Units: $kg/m^3$. set Reference density = 3300 # The value of the specific heat $C_p$. Units: $J/kg/K$. set Reference specific heat = 1250 # The reference temperature $T_0$. The reference temperature is used in # the density formula. Units: $\si{K}$. set Reference temperature = 293. # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # The value of the thermal expansion coefficient $\alpha$. Units: $1/K$. set Thermal expansion coefficient = 2e-5 # The value of the viscosity $\eta$. Units: $kg/m/s$ or $Pa s$. set Viscosity = 5000000000000000452984832.000000 end subsection Steinberger model # Whether to use bilinear interpolation to compute material properties # (slower but more accurate). set Bilinear interpolation = true # The path to the model data. The path may also include the special text # '$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/material-model/steinberger/ # Whether to include latent heat effects in the calculation of thermal # expansivity and specific heat. Following the approach of Nakagawa et al. # 2009. set Latent heat = false # The file name of the lateral viscosity data. set Lateral viscosity file name = temp-viscosity-prefactor.txt # The file names of the material data (material data is assumed to be in # order with the ordering of the compositional fields). Note that there # are three options on how many files need to be listed here: 1. If only # one file is provided, it is used for the whole model domain, and # compositional fields are ignored. 2. If there is one more file name than # the number of compositional fields, then the first file is assumed to # define a `background composition' that is modified by the compositional # fields. If there are exactly as many files as compositional fields, the # fields are assumed to represent the fractions of different materials and # the average property is computed as a sum of the value of the # compositional field times the material property of that field. set Material file names = pyr-ringwood88.txt # The relative cutoff value for lateral viscosity variations caused by # temperature deviations. The viscosity may vary laterally by this factor # squared. set Maximum lateral viscosity variation = 1e2 # The maximum viscosity that is allowed in the viscosity calculation. # Larger values will be cut off. set Maximum viscosity = 1e23 # The minimum viscosity that is allowed in the viscosity calculation. # Smaller values will be cut off. set Minimum viscosity = 1e19 # Number of bands to compute laterally averaged temperature within. set Number lateral average bands = 10 # The file name of the radial viscosity data. set Radial viscosity file name = radial-visc.txt # The reference viscosity that is used for pressure scaling. To understand # how pressure scaling works, take a look at \cite{KHB12}. In particular, # the value of this parameter would not affect the solution computed by # \aspect{} if we could do arithmetic exactly; however, computers do # arithmetic in finite precision, and consequently we need to scale # quantities in ways so that their magnitudes are roughly the same. As # explained in \cite{KHB12}, we scale the pressure during some # computations (never visible by users) by a factor that involves a # reference viscosity. This parameter describes this reference viscosity. # # For problems with a constant viscosity, you will generally want to # choose the reference viscosity equal to the actual viscosity. For # problems with a variable viscosity, the reference viscosity should be a # value that adequately represents the order of magnitude of the # viscosities that appear, such as an average value or the value one would # use to compute a Rayleigh number. # # Units: $Pa \, s$ set Reference viscosity = 1e23 # The value of the thermal conductivity $k$. Units: $W/m/K$. set Thermal conductivity = 4.7 # Whether to use to use the laterally averaged temperature instead of the # adiabatic temperature as reference for the viscosity calculation. This # ensures that the laterally averaged viscosities remain more or less # constant over the model runtime. This behaviour might or might not be # desired. set Use lateral average temperature for viscosity = true end subsection Visco Plastic # List of activation energies, $E_a$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $J / mol$ set Activation energies for diffusion creep = 375e3 # List of activation energies, $E_a$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $J / mol$ set Activation energies for dislocation creep = 530e3 # List of activation volumes, $V_a$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $m^3 / mol$ set Activation volumes for diffusion creep = 6e-6 # List of activation volumes, $V_a$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $m^3 / mol$ set Activation volumes for dislocation creep = 1.4e-5 # Add an adiabatic temperature gradient to the temperature used in the # flow law so that the activation volume is consistent with what one would # use in a earth-like (compressible) model. Default is set so this is off. # Note that this is a linear approximation of the real adiabatic gradient, # which is okay for the upper mantle, but is not really accurate for the # lower mantle. Using a pressure gradient of 32436 Pa/m, then a value of # 0.3 $K/km$ = 0.0003 $K/m$ = 9.24e-09 $K/Pa$ gives an earth-like # adiabat.Units: $K/Pa$ set Adiabat temperature gradient for viscosity = 0.0 # List of angles of internal friction, $\phi$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. For a value of zero, in 2D the von Mises # criterion is retrieved. Angles higher than 30 degrees are harder to # solve numerically. Units: degrees. set Angles of internal friction = 0. # List of cohesion strain weakening factors for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: None set Cohesion strain weakening factors = 1. # List of cohesions, $C$, for background material and compositional # fields, for a total of N+1 values, where N is the number of # compositional fields. The extremely large default cohesion value (1e20 # Pa) prevents the viscous stress from exceeding the yield stress. Units: # $Pa$. set Cohesions = 1e20 # List of constant viscosity prefactors (i.e., multiplicative factors) for # background material and compositional fields, for a total of N+1 where N # is the number of compositional fields. Units: none set Constant viscosity prefactors = 1.0 # Whether to directly define thermal conductivities for each compositional # field instead of calculating the values through the specified thermal # diffusivities, densities, and heat capacities. set Define thermal conductivities = false # Whether to list phase transitions by depth or pressure. If this # parameter is true, then the input file will use Phase transitions depths # and Phase transition widths to define the phase transition. If it is # false, the parameter file will read in phase transition data from Phase # transition pressures and Phase transition pressure widths. set Define transition by depth instead of pressure = true # List of densities for background mantle and compositional fields,for a # total of N+M+1 values, where N is the number of compositional fields and # M is the number of phases. If only one value is given, then all use the # same value. Units: $kg / m^3$ set Densities = 3300. # List of elastic shear moduli, $G$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. The default value of 75 GPa is representative # of mantle rocks. Units: Pa. set Elastic shear moduli = 75.0e9 # List of strain weakening interval final strains for the cohesion and # friction angle parameters of the background material and compositional # fields, for a total of N+1 values, where N is the number of # compositional fields. If only one value is given, then all use the same # value. Units: None set End plasticity strain weakening intervals = 1. # List of strain weakening interval final strains for the diffusion and # dislocation prefactor parameters of the background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: None set End prefactor strain weakening intervals = 1. # The fixed elastic time step $dte$. Units: years if the 'Use years in # output instead of seconds' parameter is set; seconds otherwise. set Fixed elastic time step = 1.e3 # List of friction strain weakening factors for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: None set Friction strain weakening factors = 1. # Units: $m$ set Grain size = 1e-3 # List of grain size exponents, $m_{\text{diffusion}}$, for background # material and compositional fields, for a total of N+1 values, where N is # the number of compositional fields. If only one value is given, then all # use the same value. Units: None set Grain size exponents for diffusion creep = 3. # List of specific heats $C_p$ for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $J /kg /K$ set Heat capacities = 1250. # Whether to include elastic effects in the rheological formulation. set Include viscoelasticity = false # Upper cutoff for effective viscosity. Units: $Pa \, s$ set Maximum viscosity = 1e28 # Limits the maximum value of the yield stress determined by the # drucker-prager plasticity parameters. Default value is chosen so this is # not automatically used. Values of 100e6--1000e6 $Pa$ have been used in # previous models. Units: $Pa$ set Maximum yield stress = 1e12 # Stabilizes strain dependent viscosity. Units: $1 / s$ set Minimum strain rate = 1.0e-20 # Lower cutoff for effective viscosity. Units: $Pa \, s$ set Minimum viscosity = 1e17 # A list of Clapeyron slopes for each phase transition. A positive # Clapeyron slope indicates that the phase transition will occur in a # greater depth, if the temperature is higher than the one given in Phase # transition temperatures and in a smaller depth, if the temperature is # smaller than the one given in Phase transition temperatures. For # negative slopes the other way round. List must have the same number of # entries as Phase transition depths. Units: $Pa/K$. set Phase transition Clapeyron slopes = # A list of depths where phase transitions occur. Values must # monotonically increase. Units: $m$. set Phase transition depths = # A list of widths for each phase transition, in terms of pressure. The # phase functions are scaled with these values, leading to a jump between # phases for a value of zero and a gradual transition for larger values. # List must have the same number of entries as Phase transition pressures. # Define transition by depth instead of pressure must be set to false to # use this parameter. Units: $Pa$. set Phase transition pressure widths = # A list of pressures where phase transitions occur. Values must # monotonically increase. Define transition by depth instead of pressure # must be set to false to use this parameter. Units: $Pa$. set Phase transition pressures = # A list of temperatures where phase transitions occur. Higher or lower # temperatures lead to phase transition occurring in smaller or greater # depths than given in Phase transition depths, depending on the Clapeyron # slope given in Phase transition Clapeyron slopes. List must have the # same number of entries as Phase transition depths. Units: $\si{K}$. set Phase transition temperatures = # A list of widths for each phase transition, in terms of depth. The phase # functions are scaled with these values, leading to a jump between phases # for a value of zero and a gradual transition for larger values. List # must have the same number of entries as Phase transition depths. Units: # $m$. set Phase transition widths = # List of viscous strain weakening factors for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: None set Prefactor strain weakening factors = 1. # List of viscosity prefactors, $A$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $Pa^{-1} m^{m_{\text{diffusion}}} s^{-1}$ set Prefactors for diffusion creep = 1.5e-15 # List of viscosity prefactors, $A$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $Pa^{-n_{\text{dislocation}}} s^{-1}$ set Prefactors for dislocation creep = 1.1e-16 # Reference strain rate for first time step. Units: $1 / s$ set Reference strain rate = 1.0e-15 # For calculating density by thermal expansivity. Units: $\si{K}$ set Reference temperature = 293. # Reference viscosity for nondimensionalization. To understand how # pressure scaling works, take a look at \cite{KHB12}. In particular, the # value of this parameter would not affect the solution computed by # \aspect{} if we could do arithmetic exactly; however, computers do # arithmetic in finite precision, and consequently we need to scale # quantities in ways so that their magnitudes are roughly the same. As # explained in \cite{KHB12}, we scale the pressure during some # computations (never visible by users) by a factor that involves a # reference viscosity. This parameter describes this reference viscosity. # # For problems with a constant viscosity, you will generally want to # choose the reference viscosity equal to the actual viscosity. For # problems with a variable viscosity, the reference viscosity should be a # value that adequately represents the order of magnitude of the # viscosities that appear, such as an average value or the value one would # use to compute a Rayleigh number. # # Units: $Pa \, s$ set Reference viscosity = 1e22 # List of strain weakening interval initial strains for the cohesion and # friction angle parameters of the background material and compositional # fields, for a total of N+1 values, where N is the number of # compositional fields. If only one value is given, then all use the same # value. Units: None set Start plasticity strain weakening intervals = 0. # List of strain weakening interval initial strains for the diffusion and # dislocation prefactor parameters of the background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: None set Start prefactor strain weakening intervals = 0. # Whether to apply strain weakening to viscosity, cohesion and internal # angleof friction based on accumulated finite strain, and if yes, which # method to use. The following methods are available: # # \item ``none'': No strain weakening is applied. # # \item ``finite strain tensor'': The full finite strain tensor is # tracked, and its second invariant is used to weaken both the plastic # yield stress (specifically, the cohesion and friction angle) and the # pre-yield viscosity that arises from diffusion and/or dislocation creep. # # \item ``total strain'': The finite strain is approximated as the product # of the second invariant of the strain rate in each time step and the # time step size, and this quantity is integrated and tracked over time. # It is used to weaken both the plastic yield stress (specifically, the # cohesion and friction angle) and the pre-yield viscosity. # # \item ``plastic weakening with plastic strain only'': The finite strain # is approximated as the product of the second invariant of the strain # ratein each time step and the time step size in regions where material # is plastically yielding. This quantity is integrated and tracked over # time, and used to weaken the cohesion and friction angle. The pre-yield # viscosity is not weakened. # # \item ``plastic weakening with total strain only'': The finite strain is # approximated as the product of the second invariant of the strain rate # in each time step and the time step size, and this quantity is # integrated and tracked over time. It is used to weaken the plastic yield # stress (specifically, the cohesion and internal friction angle). The # pre-yield viscosity is not weakened. # # \item ``plastic weakening with plastic strain and viscous weakening with # viscous strain'': Both the finite strain accumulated by plastic # deformation and by viscous deformation are computed separately (each # approximated as the product of the second invariant of the corresponding # strain rate in each time step and the time step size). The plastic # strain is used to weaken the plastic yield stress (specifically, the # cohesion and yield angle), and the viscous strain is used to weaken the # pre-yield viscosity. # # \item ``viscous weakening with viscous strain only'': The finite strain # is approximated as the product of the second invariant of the strain # rate in each time step and the time step size in regions where material # is not plastically yielding. This quantity is integrated and tracked # over time, and used to weaken the the pre-yield viscosity. The cohesion # and friction angle are not weakened. # # \item ``default'': The default option has the same behavior as ``none'', # but is there to make sure that the original parameters for specifying # the strain weakening mechanism (``Use plastic/viscous strain # weakening'') are still allowed, but to guarantee that one uses either # the old parameter names or the new ones, never both. # # If a compositional field named 'noninitial\_plastic\_strain' is included # in the parameter file, this field will automatically be excluded from # from volume fraction calculation and track the cumulative plastic strain # with the initial plastic strain values removed. set Strain weakening mechanism = default # List of stress exponents, $n_{\text{dislocation}}$, for background # material and compositional fields, for a total of N+1 values, where N is # the number of compositional fields. If only one value is given, then all # use the same value. Units: None set Stress exponents for dislocation creep = 3.5 # List of stress limiter exponents, $n_{\text{lim}}$, for background # material and compositional fields, for a total of N+1 values, where N is # the number of compositional fields. Units: none. set Stress limiter exponents = 1.0 # List of thermal conductivities, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. If only one value is given, then all use the # same value. Units: $W/(m K)$ set Thermal conductivities = 3.0 # List of thermal diffusivities, for background material and compositional # fields, for a total of N+1 values, where N is the number of # compositional fields. If only one value is given, then all use the same # value. Units: $m^2/s$ set Thermal diffusivities = 0.8e-6 # List of thermal expansivities for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $1/K$ set Thermal expansivities = 0.000035 # Select whether the material time scale in the viscoelastic constitutive # relationship uses the regular numerical time step or a separate fixed # elastic time step throughout the model run. The fixed elastic time step # is always used during the initial time step. If a fixed elastic time # step is used throughout the model run, a stress averaging scheme can be # applied to account for differences with the numerical time step. An # alternative approach is to limit the maximum time step size so that it # is equal to the elastic time step. The default value of this parameter # is 'unspecified', which throws an exception during runtime. In order for # the model to run the user must select 'true' or 'false'. set Use fixed elastic time step = unspecified # Whether to apply a stress averaging scheme to account for differences # between the fixed elastic time step and numerical time step. set Use stress averaging = false # When more than one compositional field is present at a point with # different viscosities, we need to come up with an average viscosity at # that point. Select a weighted harmonic, arithmetic, geometric, or # maximum composition. set Viscosity averaging scheme = harmonic # Select what type of viscosity law to use between diffusion, dislocation # and composite options. Soon there will be an option to select a specific # flow law for each assigned composition set Viscous flow law = composite # Select what type of yield mechanism to use between Drucker Prager and # stress limiter options. set Yield mechanism = drucker end subsection Viscoelastic # List of densities for background mantle and compositional fields,for a # total of N+M+1 values, where N is the number of compositional fields and # M is the number of phases. If only one value is given, then all use the # same value. Units: $kg / m^3$ set Densities = 3300. # List of elastic shear moduli, $G$, for background material and # compositional fields, for a total of N+1 values, where N is the number # of compositional fields. The default value of 75 GPa is representative # of mantle rocks. Units: Pa. set Elastic shear moduli = 75.0e9 # The fixed elastic time step $dte$. Units: years if the 'Use years in # output instead of seconds' parameter is set; seconds otherwise. set Fixed elastic time step = 1.e3 # List of specific heats $C_p$ for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $J /kg /K$ set Heat capacities = 1250. # The reference temperature $T_0$. Units: $\si{K}$. set Reference temperature = 293. # List of thermal conductivities for background mantle and compositional # fields, for a total of N+1 values, where N is the number of # compositional fields. If only one value is given, then all use the same # value. Units: $W/m/K$ set Thermal conductivities = 4.7 # List of thermal expansivities for background mantle and compositional # fields,for a total of N+M+1 values, where N is the number of # compositional fields and M is the number of phases. If only one value is # given, then all use the same value. Units: $1/K$ set Thermal expansivities = 0.000035 # Select whether the material time scale in the viscoelastic constitutive # relationship uses the regular numerical time step or a separate fixed # elastic time step throughout the model run. The fixed elastic time step # is always used during the initial time step. If a fixed elastic time # step is used throughout the model run, a stress averaging scheme can be # applied to account for differences with the numerical time step. An # alternative approach is to limit the maximum time step size so that it # is equal to the elastic time step. The default value of this parameter # is 'unspecified', which throws an exception during runtime. In order for # the model to run the user must select 'true' or 'false'. set Use fixed elastic time step = unspecified # Whether to apply a stress averaging scheme to account for differences # between the fixed elastic time step and numerical time step. set Use stress averaging = false # List of viscosities for background mantle and compositional fields, for # a total of N+1 values, where N is the number of compositional fields. If # only one value is given, then all use the same value. Units: $Pa s$ set Viscosities = 1.e21 # When more than one compositional field is present at a point with # different viscosities, we need to come up with an average viscosity at # that point. Select a weighted harmonic, arithmetic, geometric, or # maximum composition. set Viscosity averaging scheme = harmonic end end subsection Melt settings # Whether to cell-wise average the material properties that are used to # compute the melt velocity or not. The melt velocity is computed as the sum # of the solid velocity and the phase separation flux $ - K_D / \phi (\nabla # p_f - \rho_f \mathbf g)$. If this parameter is set to true, $K_D$ and # $\phi$ will be averaged cell-wise in the computation of the phase # separation flux. This is useful because in some models the melt velocity # can have spikes close to the interface between regions of melt and no # melt, as both $K_D$ and $\phi$ go to zero for vanishing melt fraction. As # the melt velocity is used for computing the time step size, and in models # that use heat transport by melt or shear heating of melt, setting this # parameter to true can speed up the model and make it mode stable. In # computations where accuracy and convergence behavior of the melt velocity # is important (like in benchmark cases with an analytical solution), this # parameter should probably be set to 'false'. set Average melt velocity = true # Whether to use a porosity weighted average of the melt and solid velocity # to advect heat in the temperature equation or not. If this is set to true, # additional terms are assembled on the left-hand side of the temperature # advection equation. Only used if Include melt transport is true. If this # is set to false, only the solid velocity is used (as in models without # melt migration). set Heat advection by melt = false # Whether to include the transport of melt into the model or not. If this is # set to true, two additional pressures (the fluid pressure and the # compaction pressure) will be added to the finite element. Including melt # transport in the simulation also requires that there is one compositional # field that has the name `porosity'. This field will be used for computing # the additional pressures and the melt velocity, and has a different # advection equation than other compositional fields, as it is effectively # advected with the melt velocity. set Include melt transport = false # The factor by how much the Darcy coefficient K\_D in a cell can be smaller # than the reference Darcy coefficient for this cell still to be considered # a melt cell (for which the melt transport equations are solved). For # smaller Darcy coefficients, the Stokes equations (without melt) are solved # instead. Only used if ``Include melt transport'' is true. set Melt scaling factor threshold = 1e-7 # Whether to use a discontinuous element for the compaction pressure or not. # From our preliminary tests, continuous elements seem to work better in # models where the porosity is > 0 everywhere in the domain, and # discontinuous elements work better in models where in parts of the domain # the porosity = 0. set Use discontinuous compaction pressure = true end subsection Mesh deformation # A comma separated list of names denoting those boundaries where there the # mesh is allowed to move tangential to the boundary. All tangential mesh # movements along those boundaries that have tangential material velocity # boundary conditions are allowed by default, this parameters allows to # generate mesh movements along other boundaries that are open, or have # prescribed material velocities or tractions. # # The names of the boundaries listed here can either be numbers (in which # case they correspond to the numerical boundary indicators assigned by the # geometry object), or they can correspond to any of the symbolic names the # geometry object may have provided for each part of the boundary. You may # want to compare this with the documentation of the geometry model you use # in your model. set Additional tangential mesh velocity boundary indicators = # A comma separated list of names denoting those boundaries where there the # mesh is allowed to move according to the specified mesh deformation # objects. # # The names of the boundaries listed here can either be numbers (in which # case they correspond to the numerical boundary indicators assigned by the # geometry object), or they can correspond to any of the symbolic names the # geometry object may have provided for each part of the boundary. You may # want to compare this with the documentation of the geometry model you use # in your model. # # The format is id1: object1 \& object2, id2: object3 \& object2, where # objects are one of `boundary function': A plugin, which prescribes the # surface mesh to deform according to an analytically prescribed function. # Note that the function prescribes a deformation velocity, i.e. the return # value of this plugin is later multiplied by the time step length to # compute the displacement increment in this time step. Although the # function's time variable is interpreted as years when Use years in output # instead of seconds is set to true, the boundary deformation velocity # should still be given in m/s. The format of the functions follows the # syntax understood by the muparser library, see # Section~\ref{sec:muparser-format}. # # `free surface': A plugin that computes the deformation of surface vertices # according to the solution of the flow problem. In particular this means if # the surface of the domain is left open to flow, this flow will carry the # mesh with it. The implementation was described in \cite{rose_freesurface}, # with the stabilization of the free surface originally described in # \cite{KMM2010}. set Mesh deformation boundary indicators = subsection Boundary function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0; 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Free surface # Theta parameter described in \cite{KMM2010}. An unstabilized free # surface can overshoot its equilibrium position quite easily and generate # unphysical results. One solution is to use a quasi-implicit correction # term to the forces near the free surface. This parameter describes how # much the free surface is stabilized with this term, where zero is no # stabilization, and one is fully implicit. set Free surface stabilization theta = 0.5 # After each time step the free surface must be advected in the direction # of the velocity field. Mass conservation requires that the mesh velocity # is in the normal direction of the surface. However, for steep topography # or large curvature, advection in the normal direction can become # ill-conditioned, and instabilities in the mesh can form. Projection of # the mesh velocity onto the local vertical direction can preserve the # mesh quality better, but at the cost of slightly poorer mass # conservation of the domain. set Surface velocity projection = normal end end subsection Mesh refinement # Use fraction of the total number of cells instead of fraction of the total # error as the limit for refinement and coarsening. set Adapt by fraction of cells = false # A list of times so that if the end time of a time step is beyond this # time, an additional round of mesh refinement is triggered. This is mostly # useful to make sure we can get through the initial transient phase of a # simulation on a relatively coarse mesh, and then refine again when we are # in a time range that we are interested in and where we would like to use a # finer mesh. Units: Each element of the list has units years if the 'Use # years in output instead of seconds' parameter is set; seconds otherwise. set Additional refinement times = # The fraction of cells with the smallest error that should be flagged for # coarsening. set Coarsening fraction = 0.05 # The number of adaptive refinement steps performed after initial global # refinement but while still within the first time step. set Initial adaptive refinement = 0 # The number of global refinement steps performed on the initial coarse # mesh, before the problem is first solved there. set Initial global refinement = 8 # default: 2 # The minimum refinement level each cell should have, and that can not be # exceeded by coarsening. Should not be higher than the 'Initial global # refinement' parameter. set Minimum refinement level = 0 # If multiple refinement criteria are specified in the ``Strategy'' # parameter, then they need to be combined somehow to form the final # refinement indicators. This is done using the method described by the # ``Refinement criteria merge operation'' parameter which can either operate # on the raw refinement indicators returned by each strategy (i.e., # dimensional quantities) or using normalized values where the indicators of # each strategy are first normalized to the interval $[0,1]$ (which also # makes them non-dimensional). This parameter determines whether this # normalization will happen. set Normalize individual refinement criteria = true # If multiple mesh refinement criteria are computed for each cell (by # passing a list of more than element to the \texttt{Strategy} parameter in # this section of the input file) then one will have to decide which one # should win when deciding which cell to refine. The operation that selects # from these competing criteria is the one that is selected here. The # options are: # # \begin{itemize} # \item \texttt{plus}: Add the various error indicators together and refine # those cells on which the sum of indicators is largest. # \item \texttt{max}: Take the maximum of the various error indicators and # refine those cells on which the maximal indicators is largest. # \end{itemize}The refinement indicators computed by each strategy are # modified by the ``Normalize individual refinement criteria'' and # ``Refinement criteria scale factors'' parameters. set Refinement criteria merge operation = max # A list of scaling factors by which every individual refinement criterion # will be multiplied by. If only a single refinement criterion is selected # (using the ``Strategy'' parameter, then this parameter has no particular # meaning. On the other hand, if multiple criteria are chosen, then these # factors are used to weigh the various indicators relative to each other. # # If ``Normalize individual refinement criteria'' is set to true, then the # criteria will first be normalized to the interval $[0,1]$ and then # multiplied by the factors specified here. You will likely want to choose # the factors to be not too far from 1 in that case, say between 1 and 10, # to avoid essentially disabling those criteria with small weights. On the # other hand, if the criteria are not normalized to $[0,1]$ using the # parameter mentioned above, then the factors you specify here need to take # into account the relative numerical size of refinement indicators (which # in that case carry physical units). # # You can experimentally play with these scaling factors by choosing to # output the refinement indicators into the graphical output of a run. # # If the list of indicators given in this parameter is empty, then this # indicates that they should all be chosen equal to one. If the list is not # empty then it needs to have as many entries as there are indicators chosen # in the ``Strategy'' parameter. set Refinement criteria scaling factors = # The fraction of cells with the largest error that should be flagged for # refinement. set Refinement fraction = 0.3 # Whether or not the postprocessors should be executed after each of the # initial adaptive refinement cycles that are run at the start of the # simulation. set Run postprocessors on initial refinement = false # Whether or not the initial conditions should be set up during the the # adaptive refinement cycles that are run at the start of the simulation. set Skip setup initial conditions on initial refinement = false # Whether or not solvers should be executed during the initial adaptive # refinement cycles that are run at the start of the simulation. set Skip solvers on initial refinement = false # A comma separated list of mesh refinement criteria that will be run # whenever mesh refinement is required. The results of each of these # criteria, i.e., the refinement indicators they produce for all the cells # of the mesh will then be normalized to a range between zero and one and # the results of different criteria will then be merged through the # operation selected in this section. # # The following criteria are available: # # `artificial viscosity': A mesh refinement criterion that computes # refinement indicators from the artificial viscosity of the temperature or # compositional fields based on user specified weights. # # `boundary': A class that implements a mesh refinement criterion which # always flags all cells on specified boundaries for refinement. This is # useful to provide high accuracy for processes at or close to the edge of # the model domain. # # To use this refinement criterion, you may want to combine it with other # refinement criteria, setting the 'Normalize individual refinement # criteria' flag and using the `max' setting for 'Refinement criteria merge # operation'. # # `compaction length': A mesh refinement criterion for models with melt # transport that computes refinement indicators based on the compaction # length, defined as $\delta = \sqrt{\frac{(\xi + 4 \eta/3) k}{\eta_f}}$. # $\xi$ is the bulk viscosity, $\eta$ is the shear viscosity, $k$ is the # permeability and $\eta_f$ is the melt viscosity. If the cell width or # height exceeds a multiple (which is specified as an input parameter) of # this compaction length, the cell is marked for refinement. # # `composition': A mesh refinement criterion that computes refinement # indicators from the compositional fields. If there is more than one # compositional field, then it simply takes the sum of the indicators # computed from each of the compositional field. # # The way these indicators are computed is by evaluating the `Kelly error # indicator' on each compositional field. This error indicator takes the # finite element approximation of the compositional field and uses it to # compute an approximation of the second derivatives of the composition for # each cell. This approximation is then multiplied by an appropriate power # of the cell's diameter to yield an indicator for how large the error is # likely going to be on this cell. This construction rests on the # observation that for many partial differential equations, the error on # each cell is proportional to some power of the cell's diameter times the # second derivatives of the solution on that cell. # # For complex equations such as those we solve here, this observation may # not be strictly true in the mathematical sense, but it often yields meshes # that are surprisingly good. # # `composition approximate gradient': A mesh refinement criterion that # computes refinement indicators from the gradients of compositional fields. # If there is more than one compositional field, then it simply takes the # sum of the indicators times a user-specified weight for each field. # # In contrast to the `composition gradient' refinement criterion, the # current criterion does not compute the gradient at quadrature points on # each cell, but by a finite difference approximation between the centers of # cells. Consequently, it also works if the compositional fields are # computed using discontinuous finite elements. # # `composition gradient': A mesh refinement criterion that computes # refinement indicators from the gradients of compositional fields. If there # is more than one compositional field, then it simply takes the sum of the # indicators times a user-specified weight for each field. # # This refinement criterion computes the gradient of the compositional field # at quadrature points on each cell, and then averages them in some way to # obtain a refinement indicator for each cell. This will give a reasonable # approximation of the true gradient of the compositional field if you are # using a continuous finite element. # # On the other hand, for discontinuous finite elements (see the `Use # discontinuous composition discretization' parameter in the # `Discretization' section), the gradient at quadrature points does not # include the contribution of jumps in the compositional field between # cells, and consequently will not be an accurate approximation of the true # gradient. As an extreme example, consider the case of using piecewise # constant finite elements for compositional fields; in that case, the # gradient of the solution at quadrature points inside each cell will always # be exactly zero, even if the finite element solution is different from # each cell to the next. Consequently, the current refinement criterion will # likely not be useful in this situation. That said, the `composition # approximate gradient' refinement criterion exists for exactly this # purpose. # # `composition threshold': A mesh refinement criterion that computes # refinement indicators from the compositional fields. If any field exceeds # the threshold given in the input file, the cell is marked for refinement. # # `density': A mesh refinement criterion that computes refinement indicators # from a field that describes the spatial variability of the density, # $\rho$. Because this quantity may not be a continuous function ($\rho$ and # $C_p$ may be discontinuous functions along discontinuities in the medium, # for example due to phase changes), we approximate the gradient of this # quantity to refine the mesh. The error indicator defined here takes the # magnitude of the approximate gradient and scales it by $h_K^{1+d/2}$ where # $h_K$ is the diameter of each cell and $d$ is the dimension. This scaling # ensures that the error indicators converge to zero as $h_K\rightarrow 0$ # even if the energy density is discontinuous, since the gradient of a # discontinuous function grows like $1/h_K$. # # `maximum refinement function': A mesh refinement criterion that ensures a # maximum refinement level described by an explicit formula with the depth # or position as argument. Which coordinate representation is used is # determined by an input parameter. Whatever the coordinate system chosen, # the function you provide in the input file will by default depend on # variables `x', `y' and `z' (if in 3d). However, the meaning of these # symbols depends on the coordinate system. In the Cartesian coordinate # system, they simply refer to their natural meaning. If you have selected # `depth' for the coordinate system, then `x' refers to the depth variable # and `y' and `z' will simply always be zero. If you have selected a # spherical coordinate system, then `x' will refer to the radial distance of # the point to the origin, `y' to the azimuth angle and `z' to the polar # angle measured positive from the north pole. Note that the order of # spherical coordinates is r,phi,theta and not r,theta,phi, since this # allows for dimension independent expressions. Each coordinate system also # includes a final `t' variable which represents the model time, evaluated # in years if the 'Use years in output instead of seconds' parameter is set, # otherwise evaluated in seconds. After evaluating the function, its values # are rounded to the nearest integer. # # The format of these functions follows the syntax understood by the # muparser library, see Section~\ref{sec:muparser-format}. # # `minimum refinement function': A mesh refinement criterion that ensures a # minimum refinement level described by an explicit formula with the depth # or position as argument. Which coordinate representation is used is # determined by an input parameter. Whatever the coordinate system chosen, # the function you provide in the input file will by default depend on # variables `x', `y' and `z' (if in 3d). However, the meaning of these # symbols depends on the coordinate system. In the Cartesian coordinate # system, they simply refer to their natural meaning. If you have selected # `depth' for the coordinate system, then `x' refers to the depth variable # and `y' and `z' will simply always be zero. If you have selected a # spherical coordinate system, then `x' will refer to the radial distance of # the point to the origin, `y' to the azimuth angle and `z' to the polar # angle measured positive from the north pole. Note that the order of # spherical coordinates is r,phi,theta and not r,theta,phi, since this # allows for dimension independent expressions. Each coordinate system also # includes a final `t' variable which represents the model time, evaluated # in years if the 'Use years in output instead of seconds' parameter is set, # otherwise evaluated in seconds. After evaluating the function, its values # are rounded to the nearest integer. # # The format of these functions follows the syntax understood by the # muparser library, see Section~\ref{sec:muparser-format}. # # `nonadiabatic temperature': A mesh refinement criterion that computes # refinement indicators from the excess temperature(difference between # temperature and adiabatic temperature. # # `particle density': A mesh refinement criterion that computes refinement # indicators based on the density of particles. In practice this plugin # equilibrates the number of particles per cell, leading to fine cells in # high particle density regions and coarse cells in low particle density # regions. This plugin is mostly useful for models with inhomogeneous # particle density, e.g. when tracking an initial interface with a high # particle density, or when the spatial particle density denotes the region # of interest. Additionally, this plugin tends to balance the computational # load between processes in parallel computations, because the particle and # mesh density is more aligned. # # `slope': A class that implements a mesh refinement criterion intended for # use with deforming mesh boundaries, like the free surface. It calculates a # local slope based on the angle between the surface normal and the local # gravity vector. Cells with larger angles are marked for refinement. # # To use this refinement criterion, you may want to combine it with other # refinement criteria, setting the 'Normalize individual refinement # criteria' flag and using the `max' setting for 'Refinement criteria merge # operation'. # # `strain rate': A mesh refinement criterion that computes the refinement # indicators equal to the strain rate norm computed at the center of the # elements. # # `temperature': A mesh refinement criterion that computes refinement # indicators from the temperature field. # # The way these indicators are computed is by evaluating the `Kelly error # indicator' on the temperature field. This error indicator takes the finite # element approximation of the temperature field and uses it to compute an # approximation of the second derivatives of the temperature for each cell. # This approximation is then multiplied by an appropriate power of the # cell's diameter to yield an indicator for how large the error is likely # going to be on this cell. This construction rests on the observation that # for many partial differential equations, the error on each cell is # proportional to some power of the cell's diameter times the second # derivatives of the solution on that cell. # # For complex equations such as those we solve here, this observation may # not be strictly true in the mathematical sense, but it often yields meshes # that are surprisingly good. # # `thermal energy density': A mesh refinement criterion that computes # refinement indicators from a field that describes the spatial variability # of the thermal energy density, $\rho C_p T$. Because this quantity may not # be a continuous function ($\rho$ and $C_p$ may be discontinuous functions # along discontinuities in the medium, for example due to phase changes), we # approximate the gradient of this quantity to refine the mesh. The error # indicator defined here takes the magnitude of the approximate gradient and # scales it by $h_K^{1.5}$ where $h_K$ is the diameter of each cell. This # scaling ensures that the error indicators converge to zero as # $h_K\rightarrow 0$ even if the energy density is discontinuous, since the # gradient of a discontinuous function grows like $1/h_K$. # # `topography': A class that implements a mesh refinement criterion, which # always flags all cells in the uppermost layer for refinement. This is # useful to provide high accuracy for processes at or close to the surface. # # To use this refinement criterion, you may want to combine it with other # refinement criteria, setting the 'Normalize individual refinement # criteria' flag and using the `max' setting for 'Refinement criteria merge # operation'. # # `velocity': A mesh refinement criterion that computes refinement # indicators from the velocity field. # # The way these indicators are computed is by evaluating the `Kelly error # indicator' on the velocity field. This error indicator takes the finite # element approximation of the velocity field and uses it to compute an # approximation of the second derivatives of the velocity for each cell. # This approximation is then multiplied by an appropriate power of the # cell's diameter to yield an indicator for how large the error is likely # going to be on this cell. This construction rests on the observation that # for many partial differential equations, the error on each cell is # proportional to some power of the cell's diameter times the second # derivatives of the solution on that cell. # # For complex equations such as those we solve here, this observation may # not be strictly true in the mathematical sense, but it often yields meshes # that are surprisingly good. # # `viscosity': A mesh refinement criterion that computes refinement # indicators from a field that describes the spatial variability of the # logarithm of the viscosity, $\log\eta$. (We choose the logarithm of the # viscosity because it can vary by orders of magnitude.)Because this # quantity may not be a continuous function ($\eta$ may be a discontinuous # function along discontinuities in the medium, for example due to phase # changes), we approximate the gradient of this quantity to refine the mesh. # The error indicator defined here takes the magnitude of the approximate # gradient and scales it by $h_K^{1+d/2}$ where $h_K$ is the diameter of # each cell and $d$ is the dimension. This scaling ensures that the error # indicators converge to zero as $h_K\rightarrow 0$ even if the energy # density is discontinuous, since the gradient of a discontinuous function # grows like $1/h_K$. # # `volume of fluid interface': A class that implements a mesh refinement # criterion, which ensures a minimum level of refinement near the volume of # fluid interface boundary. set Strategy = thermal energy density # The number of time steps after which the mesh is to be adapted again based # on computed error indicators. If 0 then the mesh will never be changed. set Time steps between mesh refinement = 0 # default: 10 subsection Artificial viscosity # A list of scaling factors by which every individual compositional field # will be multiplied. These factors are used to weigh the various # indicators relative to each other and to the temperature. # # If the list of scaling factors given in this parameter is empty, then # this indicates that they should all be chosen equal to 0. If the list is # not empty then it needs to have as many entries as there are # compositional fields. set Compositional field scaling factors = # A scaling factor for the artificial viscosity of the temperature # equation. Use 0.0 to disable. set Temperature scaling factor = 0.0 end subsection Boundary # A comma separated list of names denoting those boundaries where there # should be mesh refinement. # # The names of the boundaries listed here can either be numbers (in which # case they correspond to the numerical boundary indicators assigned by # the geometry object), or they can correspond to any of the symbolic # names the geometry object may have provided for each part of the # boundary. You may want to compare this with the documentation of the # geometry model you use in your model. set Boundary refinement indicators = end subsection Compaction length # The desired ratio between compaction length and size of the mesh cells, # or, in other words, how many cells the mesh should (at least) have per # compaction length. Every cell where this ratio is smaller than the value # specified by this parameter (in places with fewer mesh cells per # compaction length) is marked for refinement. set Mesh cells per compaction length = 1.0 end subsection Composition # A list of scaling factors by which every individual compositional field # will be multiplied. If only a single compositional field exists, then # this parameter has no particular meaning. On the other hand, if multiple # criteria are chosen, then these factors are used to weigh the various # indicators relative to each other. # # If the list of scaling factors given in this parameter is empty, then # this indicates that they should all be chosen equal to one. If the list # is not empty then it needs to have as many entries as there are # compositional fields. set Compositional field scaling factors = end subsection Composition approximate gradient # A list of scaling factors by which every individual compositional field # gradient will be multiplied. If only a single compositional field # exists, then this parameter has no particular meaning. On the other # hand, if multiple criteria are chosen, then these factors are used to # weigh the various indicators relative to each other. # # If the list of scaling factors given in this parameter is empty, then # this indicates that they should all be chosen equal to one. If the list # is not empty then it needs to have as many entries as there are # compositional fields. set Compositional field scaling factors = end subsection Composition gradient # A list of scaling factors by which every individual compositional field # gradient will be multiplied. If only a single compositional field # exists, then this parameter has no particular meaning. On the other # hand, if multiple criteria are chosen, then these factors are used to # weigh the various indicators relative to each other. # # If the list of scaling factors given in this parameter is empty, then # this indicates that they should all be chosen equal to one. If the list # is not empty then it needs to have as many entries as there are # compositional fields. set Compositional field scaling factors = end subsection Composition threshold # A list of thresholds that every individual compositional field will be # evaluated against. set Compositional field thresholds = end subsection Maximum refinement function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `depth', `cartesian' and # `spherical'. `depth' will create a function, in which only the first # variable is non-zero, which is interpreted to be the depth of the point. # `spherical' coordinates are interpreted as r,phi or r,phi,theta in 2D/3D # respectively with theta being the polar angle. set Coordinate system = depth # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Minimum refinement function # A selection that determines the assumed coordinate system for the # function variables. Allowed values are `depth', `cartesian' and # `spherical'. `depth' will create a function, in which only the first # variable is non-zero, which is interpreted to be the depth of the point. # `spherical' coordinates are interpreted as r,phi or r,phi,theta in 2D/3D # respectively with theta being the polar angle. set Coordinate system = depth # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Volume of fluid interface # If true, then explicitly coarsen any cells not neighboring the # VolumeOfFluid interface. set Strict coarsening = false end end subsection Nullspace removal # Choose none, one or several from # # \begin{itemize} \item net rotation \item angular momentum \item net # translation \item linear momentum \item net x translation \item net y # translation \item net z translation \item linear x momentum \item linear y # momentum \item linear z momentum \end{itemize} # # These are a selection of operations to remove certain parts of the # nullspace from the velocity after solving. For some geometries and certain # boundary conditions the velocity field is not uniquely determined but # contains free translations and/or rotations. Depending on what you specify # here, these non-determined modes will be removed from the velocity field # at the end of the Stokes solve step. # # # The ``angular momentum'' option removes a rotation such that the net # angular momentum is zero. The ``linear * momentum'' options remove # translations such that the net momentum in the relevant direction is zero. # The ``net rotation'' option removes the net rotation of the domain, and # the ``net * translation'' options remove the net translations in the # relevant directions. For most problems there should not be a significant # difference between the momentum and rotation/translation versions of # nullspace removal, although the momentum versions are more physically # motivated. They are equivalent for constant density simulations, and # approximately equivalent when the density variations are small. # # Note that while more than one operation can be selected it only makes # sense to pick one rotational and one translational operation. set Remove nullspace = net rotation # default: end subsection Postprocess # A comma separated list of postprocessor objects that should be run at the # end of each time step. Some of these postprocessors will declare their own # parameters which may, for example, include that they will actually do # something only every so many time steps or years. Alternatively, the text # `all' indicates that all available postprocessors should be run after each # time step. # # The following postprocessors are available: # # `Stokes residual': A postprocessor that outputs the Stokes residuals # during the iterative solver algorithm into a file stokes_residuals.txt in # the output directory. # # `basic statistics': A postprocessor that outputs some simplified # statistics like the Rayleigh number and other quantities that only make # sense in certain model setups. The output is written after completing # initial adaptive refinement steps. The postprocessor assumes a point at # the surface at the adiabatic surface temperature and pressure is a # reasonable reference condition for computing these properties. # Furthermore, the Rayleigh number is computed using the model depth (i.e. # not the radius of the Earth), as we need a definition that is geometry # independent. Take care when comparing these values to published studies # and make sure they use the same definitions. # # `boundary densities': A postprocessor that computes the laterally averaged # density at the top and bottom of the domain. # # `boundary pressures': A postprocessor that computes the laterally averaged # pressure at the top and bottom of the domain. # # `command': A postprocessor that executes a command line process. # # `composition statistics': A postprocessor that computes some statistics # about the compositional fields, if present in this simulation. In # particular, it computes maximal and minimal values of each field, as well # as the total mass contained in this field as defined by the integral # $m_i(t) = \int_\Omega c_i(\mathbf x,t) \; \text{d}x$. # # `core statistics': A postprocessor that computes some statistics about the # core evolution. (Working only with dynamic core boundary temperature # plugin) # # `depth average': A postprocessor that computes depth averaged quantities # and writes them into a file in the output directory, # where the extension of the file is determined by the output format you # select. In addition to the output format, a number of other parameters # also influence this postprocessor, and they can be set in the section # \texttt{Postprocess/Depth average} in the input file. # # In the output files, the $x$-value of each data point corresponds to the # depth, whereas the $y$-value corresponds to the simulation time. The time # is provided in seconds or, if the global ``Use years in output instead of # seconds'' parameter is set, in years. # # `dynamic topography': A postprocessor that computes a measure of dynamic # topography based on the stress at the surface and bottom. The data is # written into text files named `dynamic\_topography.NNNNN' in the output # directory, where NNNNN is the number of the time step. # # The exact approach works as follows: At the centers of all cells that sit # along the top surface, we evaluate the stress and evaluate the component # of it in the direction in which gravity acts. In other words, we compute # $\sigma_{rr}={\hat g}^T(2 \eta \varepsilon(\mathbf u)- \frac 13 # (\textrm{div}\;\mathbf u)I)\hat g - p_d$ where $\hat g = \mathbf # g/\|\mathbf g\|$ is the direction of the gravity vector $\mathbf g$ and # $p_d=p-p_a$ is the dynamic pressure computed by subtracting the adiabatic # pressure $p_a$ from the total pressure $p$ computed as part of the Stokes # solve. From this, the dynamic topography is computed using the formula # $h=\frac{\sigma_{rr}}{(\mathbf g \cdot \mathbf n) \rho}$ where $\rho$ is # the density at the cell center. For the bottom surface we chose the # convection that positive values are up (out) and negative values are in # (down), analogous to the deformation of the upper surface. Note that this # implementation takes the direction of gravity into account, which means # that reversing the flow in backward advection calculations will not # reverse the instantaneous topography because the reverse flow will be # divided by the reverse surface gravity. # The file format then consists of lines with Euclidean coordinates followed # by the corresponding topography value. # # (As a side note, the postprocessor chooses the cell center instead of the # center of the cell face at the surface, where we really are interested in # the quantity, since this often gives better accuracy. The results should # in essence be the same, though.) # # `entropy viscosity statistics': A postprocessor that computes the maximum # and volume averagedentropy viscosity stabilization for the temperature # field. # # `geoid': A postprocessor that computes a representation of the geoid based # on the density structure in the mantle, as well as the dynamic topography # at the surface and core mantle boundary (CMB). The geoid is computed from # a spherical harmonic expansion, so the geometry of the domain must be a 3D # spherical shell. # # `global statistics': A postprocessor that outputs all the global # statistics information, e.g. the time of the simulation, the timestep # number, number of degrees of freedom and solver iterations for each # timestep. The postprocessor can output different formats, the first # printing one line in the statistics file per nonlinear solver iteration # (if a nonlinear solver scheme is selected). The second prints one line per # timestep, summing the information about all nonlinear iterations in this # line. Note that this postprocessor is always active independent on whether # or not it is selected in the parameter file. # # `gravity calculation': A postprocessor that computes gravity, gravity # anomalies, gravity potential and gravity gradients for a set of points # (e.g. satellites) in or above the model surface for either a user-defined # range of latitudes, longitudes and radius or a list of point # coordinates.Spherical coordinates in the output file are radius, # colatitude and colongitude. Gravity is here based on the density # distribution from the material model (and non adiabatic). This means that # the density may come directly from an ascii file. This postprocessor also # computes theoretical gravity and its derivatives, which corresponds to the # analytical solution of gravity in the same geometry but filled with a # reference density. The reference density is also used to determine density # anomalies for computing gravity anomalies. Thus one must carefully # evaluate the meaning of the gravity anomaly output, because the solution # may not reflect the actual gravity anomaly (due to differences in the # assumed reference density). On way to guarantee correct gravity anomalies # is to subtract gravity of a certain point from the average gravity on the # map. Another way is to directly use density anomalies for this # postprocessor.The average- minimum- and maximum gravity acceleration and # potential are written into the statistics file. # # `heat flux densities': A postprocessor that computes some statistics about # the heat flux density for each boundary id. The heat flux density across # each boundary is computed in outward direction, i.e., from the domain to # the outside. The heat flux is computed as sum of advective heat flux and # conductive heat flux through Neumann boundaries, both computed as integral # over the boundary area, and conductive heat flux through Dirichlet # boundaries, which is computed using the consistent boundary flux method as # described in ``Gresho, Lee, Sani, Maslanik, Eaton (1987). The consistent # Galerkin FEM for computing derived boundary quantities in thermal and or # fluids problems. International Journal for Numerical Methods in Fluids, # 7(4), 371-394.'' # # Note that the ``heat flux statistics'' postprocessor computes the same # quantity as the one here, but not divided by the area of the surface. In # other words, it computes the \textit{total} heat flux through each # boundary. # # `heat flux map': A postprocessor that computes the heat flux density # across each boundary in outward direction, i.e., from the domain to the # outside. The heat flux is computed as sum of advective heat flux and # conductive heat flux through Neumann boundaries, both computed as integral # over the boundary area, and conductive heat flux through Dirichlet # boundaries, which is computed using the consistent boundary flux method as # described in ``Gresho, Lee, Sani, Maslanik, Eaton (1987). The consistent # Galerkin FEM for computing derived boundary quantities in thermal and or # fluids problems. International Journal for Numerical Methods in Fluids, # 7(4), 371-394.'' # # `heat flux statistics': A postprocessor that computes some statistics # about the heat flux density across each boundary in outward direction, # i.e., from the domain to the outside. The heat flux is computed as sum of # advective heat flux and conductive heat flux through Neumann boundaries, # both computed as integral over the boundary area, and conductive heat flux # through Dirichlet boundaries, which is computed using the consistent # boundary flux method as described in ``Gresho, Lee, Sani, Maslanik, Eaton # (1987). The consistent Galerkin FEM for computing derived boundary # quantities in thermal and or fluids problems. International Journal for # Numerical Methods in Fluids, 7(4), 371-394.''The point-wise heat flux can # be obtained from the heat flux map postprocessor, which outputs the heat # flux to a file, or the heat flux map visualization postprocessor, which # outputs the heat flux for visualization. # # As stated, this postprocessor computes the \textit{outbound} heat flux. If # you are interested in the opposite direction, for example from the core # into the mantle when the domain describes the mantle, then you need to # multiply the result by -1. # # \note{In geodynamics, the term ``heat flux'' is often understood to be the # quantity $- k \nabla T$, which is really a heat flux \textit{density}, # i.e., a vector-valued field. In contrast to this, the current # postprocessor only computes the integrated flux over each part of the # boundary. Consequently, the units of the quantity computed here are # $W=\frac{J}{s}$.} # # The ``heat flux densities'' postprocessor computes the same quantity as # the one here, but divided by the area of the surface. # # `heating statistics': A postprocessor that computes some statistics about # heating, averaged by volume. # # `load balance statistics': A postprocessor that computes statistics about # the distribution of cells, and if present particles across subdomains. In # particular, it computes maximal, average and minimal number of cells # across all ranks. If there are particles it also computes the maximal, # average, and minimum number of particles across all ranks, and maximal, # average, and minimal ratio between local number of particles and local # number of cells across all processes. All of these numbers can be useful # to assess the load balance between different MPI ranks, as the difference # between the mimimal and maximal load should be as small as possible. # # `mass flux statistics': A postprocessor that computes some statistics # about the mass flux across boundaries. For each boundary indicator (see # your geometry description for which boundary indicators are used), the # mass flux is computed in outward direction, i.e., from the domain to the # outside, using the formula $\int_{\Gamma_i} \rho \mathbf v \cdot \mathbf # n$ where $\Gamma_i$ is the part of the boundary with indicator $i$, $\rho$ # is the density as reported by the material model, $\mathbf v$ is the # velocity, and $\mathbf n$ is the outward normal. # # As stated, this postprocessor computes the \textit{outbound} mass flux. If # you are interested in the opposite direction, for example from the core # into the mantle when the domain describes the mantle, then you need to # multiply the result by -1. # # \note{In geodynamics, the term ``mass flux'' is often understood to be the # quantity $\rho \mathbf v$, which is really a mass flux \textit{density}, # i.e., a vector-valued field. In contrast to this, the current # postprocessor only computes the integrated flux over each part of the # boundary. Consequently, the units of the quantity computed here are # $\frac{kg}{s}$.} # # `material statistics': A postprocessor that computes some statistics about # the material properties. In particular, it computes the volume-averages of # the density and viscosity, and the total mass in the model. Specifically, # it implements the following formulas in each time step: $\left<\rho\right> # = \frac{1}{|\Omega|} \int_\Omega \rho(\mathbf x) \, \text{d}x$, # $\left<\eta\right> = \frac{1}{|\Omega|} \int_\Omega \eta(\mathbf x) \, # \text{d}x$, $M = \int_\Omega \rho(\mathbf x) \, \text{d}x$, where # $|\Omega|$ is the volume of the domain. # # `matrix statistics': A postprocessor that computes some statistics about # the matrices. In particular, it outputs total memory consumption, total # non-zero elements, and non-zero elements per block, for system matrix and # system preconditioner matrix. # # `melt statistics': A postprocessor that computes some statistics about the # melt (volume) fraction. If the material model does not implement a melt # fraction function, the output is set to zero. # # `memory statistics': A postprocessor that computes some statistics about # the memory consumption. In particular, it computes the memory usage of the # system matrix, triangulation, p4est, DoFHandler, current constraints, # solution vector, and peak virtual memory usage, all in MB. It also outputs # the memory usage of the system matrix to the screen. # # `particle count statistics': A postprocessor that computes some statistics # about the particle distribution, if present in this simulation. In # particular, it computes minimal, average and maximal values of particles # per cell in the global domain. # # `particles': A Postprocessor that creates particles that follow the # velocity field of the simulation. The particles can be generated and # propagated in various ways and they can carry a number of constant or # time-varying properties. The postprocessor can write output positions and # properties of all particles at chosen intervals, although this is not # mandatory. It also allows other parts of the code to query the particles # for information. # # `point values': A postprocessor that evaluates the solution (i.e., # velocity, pressure, temperature, and compositional fields along with other # fields that are treated as primary variables) at the end of every time # step or after a user-specified time interval at a given set of points and # then writes this data into the file in the output # directory. The points at which the solution should be evaluated are # specified in the section \texttt{Postprocess/Point values} in the input # file. # # In the output file, data is organized as (i) time, (ii) the 2 or 3 # coordinates of the evaluation points, and (iii) followed by the values of # the solution vector at this point. The time is provided in seconds or, if # the global ``Use years in output instead of seconds'' parameter is set, in # years. In the latter case, the velocity is also converted to meters/year, # instead of meters/second. # # \note{Evaluating the solution of a finite element field at arbitrarily # chosen points is an expensive process. Using this postprocessor will only # be efficient if the number of evaluation points or output times is # relatively small. If you need a very large number of evaluation points, # you should consider extracting this information from the visualization # program you use to display the output of the `visualization' # postprocessor.} # # `pressure statistics': A postprocessor that computes some statistics about # the pressure field. # # `rotation statistics': A postprocessor that computes some statistics about # the rotational velocity of the model (i.e. integrated net rotation and # angular momentum). In 2D we assume the model to be a cross-section through # an infinite domain in z direction, with a zero z-velocity. Thus, the # z-axis is the only possible rotation axis and both moment of inertia and # angular momentum are scalar instead of tensor quantities. # # `spherical velocity statistics': A postprocessor that computes radial, # tangential and total RMS velocity. # # `temperature statistics': A postprocessor that computes some statistics # about the temperature field. # # `topography': A postprocessor intended for use with a deforming top # surface. After every step it loops over all the vertices on the top # surface and determines the maximum and minimum topography relative to a # reference datum (initial box height for a box geometry model or initial # radius for a sphere/spherical shell geometry model). If 'Topography.Output # to file' is set to true, also outputs topography into text files named # `topography.NNNNN' in the output directory, where NNNNN is the number of # the time step. # The file format then consists of lines with Euclidean coordinates followed # by the corresponding topography value.Topography is printed/written in # meters. # # `velocity boundary statistics': A postprocessor that computes some # statistics about the velocity along the boundaries. For each boundary # indicator (see your geometry description for which boundary indicators are # used), the min and max velocity magnitude is computed. # # `velocity statistics': A postprocessor that computes some statistics about # the velocity field. # # `visualization': A postprocessor that takes the solution and writes it # into files that can be read by a graphical visualization program. # Additional run time parameters are read from the parameter subsection # 'Visualization'. # # `volume of fluid statistics': A postprocessor that computes some # statistics about the volume-of-fluid fields. set List of postprocessors = visualization, velocity statistics, temperature statistics, heat flux statistics, heat flux densities, depth average, basic statistics, entropy viscosity statistics, material statistics # default: # Whether or not the postprocessors should be executed after each of the # nonlinear iterations done within one time step. As this is mainly an # option for the purposes of debugging, it is not supported when the 'Time # between graphical output' is larger than zero, or when the postprocessor # is not intended to be run more than once per timestep. set Run postprocessors on nonlinear iterations = false subsection Command # Command to execute. set Command = # Whether to run command from all processes (true), or only on process 0 # (false). set Run on all processes = false # Select whether \aspect{} should terminate if the command returns a # non-zero exit status. set Terminate on failure = false end subsection Depth average # A comma separated list which specifies which quantities to average in # each depth slice. It defaults to averaging all available quantities, but # this can be an expensive operation, so you may want to select only a # few. # # List of options: # all|temperature|composition|adiabatic temperature|adiabatic # pressure|adiabatic density|adiabatic density derivative|velocity # magnitude|sinking velocity|Vs|Vp|viscosity|vertical heat flux|vertical # mass flux set List of output variables = all # The number of zones in depth direction within which we are to compute # averages. By default, we subdivide the entire domain into 10 depth zones # and compute temperature and other averages within each of these zones. # However, if you have a very coarse mesh, it may not make much sense to # subdivide the domain into so many zones and you may wish to choose less # than this default. It may also make computations slightly faster. On the # other hand, if you have an extremely highly resolved mesh, choosing more # zones might also make sense. set Number of zones = 96 # default: 10 # A list of formats in which the output shall be produced. The format in # which the output is generated also determines the extension of the file # into which data is written. The list of possible output formats that can # be given here is documented in the appendix of the manual where the # current parameter is described. By default the output is written as # gnuplot file (for plotting), and as a simple text file. set Output format = txt # default: gnuplot, txt # The time interval between each generation of graphical output files. A # value of zero indicates that output should be generated in each time # step. Units: years if the 'Use years in output instead of seconds' # parameter is set; seconds otherwise. set Time between graphical output = 1000.0 # default: 1e8 end subsection Dynamic core statistics # Output the excess entropy only instead the each entropy terms. set Excess entropy only = false end subsection Dynamic topography # Dynamic topography is calculated as the excess or lack of mass that is # supported by mantle flow. This value depends on the density of material # that is moved up or down, i.e. crustal rock, and the density of the # material that is displaced (generally water or air). While the density # of crustal rock is part of the material model, this parameter `Density # above' allows the user to specify the density value of material that is # displaced above the solid surface. By default this material is assumed # to be air, with a density of 0. Units: $kg/m^3$. set Density above = 0. # Dynamic topography is calculated as the excess or lack of mass that is # supported by mantle flow. This value depends on the density of material # that is moved up or down, i.e. mantle above CMB, and the density of the # material that is displaced (generally outer core material). While the # density of mantle rock is part of the material model, this parameter # `Density below' allows the user to specify the density value of material # that is displaced below the solid surface. By default this material is # assumed to be outer core material with a density of 9900. Units: # $kg/m^3$. set Density below = 9900. # Whether to output a file containing the bottom (i.e., CMB) dynamic # topography. set Output bottom = true # Whether to output a file containing the surface dynamic topography. set Output surface = true end subsection Geoid # Option to also output the free-air gravity anomaly up to the maximum # degree. The unit of the output is in SI, hence $m/s^2$ ($1mgal = 10^-5 # m/s^2$). The default is false. set Also output the gravity anomaly = false # Option to also output the spherical harmonic coefficients of the CMB # dynamic topography contribution to the maximum degree. The default is # false. set Also output the spherical harmonic coefficients of CMB dynamic topography contribution = false # Option to also output the spherical harmonic coefficients of the density # anomaly contribution to the maximum degree. The default is false. set Also output the spherical harmonic coefficients of density anomaly contribution = false # Option to also output the spherical harmonic coefficients of the geoid # anomaly up to the maximum degree. The default is false, so postprocess # will only output the geoid anomaly in grid format. set Also output the spherical harmonic coefficients of geoid anomaly = false # Option to also output the spherical harmonic coefficients of the surface # dynamic topography contribution to the maximum degree. The default is # false. set Also output the spherical harmonic coefficients of surface dynamic topography contribution = false # The density value above the surface boundary. set Density above = 0. # The density value below the CMB boundary. set Density below = 9900. # Option to include the contribution from dynamic topography on geoid. The # default is true. set Include the contributon from dynamic topography = true # This parameter can be a random positive integer. However, the value # normally should not exceed the maximum degree of the initial perturbed # temperature field. For example, if the initial temperature uses S40RTS, # the maximum degree should not be larger than 40. set Maximum degree = 20 # This parameter normally is set to 2 since the perturbed gravitational # potential at degree 1 always vanishes in a reference frame with the # planetary center of mass same as the center of figure. set Minimum degree = 2 # Option to output the geoid anomaly in geographical coordinates (latitude # and longitude). The default is false, so postprocess will output the # data in geocentric coordinates (x,y,z) as normally. set Output data in geographical coordinates = false end subsection Global statistics # Whether to put every nonlinear iteration into a separate line in the # statistics file (if true), or to output only one line per time step that # contains the total number of iterations of the Stokes and advection # linear system solver. set Write statistics for each nonlinear iteration = false end subsection Gravity calculation # Parameter for the list of points sampling scheme: List of satellite # latitude coordinates. set List of latitude = # Parameter for the list of points sampling scheme: List of satellite # longitude coordinates. set List of longitude = # Parameter for the list of points sampling scheme: List of satellite # radius coordinates. Just specify one radius if all points values have # the same radius. If not, make sure there are as many radius as longitude # and latitude set List of radius = # Parameter for the uniform distribution sampling scheme: Gravity may be # calculated for a sets of points along the latitude between a minimum and # maximum latitude. set Maximum latitude = 90 # Parameter for the uniform distribution sampling scheme: Gravity may be # calculated for a sets of points along the longitude between a minimum # and maximum longitude. set Maximum longitude = 180. # Parameter for the map sampling scheme: Maximum radius can be defined in # or outside the model. set Maximum radius = 0. # Parameter for the uniform distribution sampling scheme: Gravity may be # calculated for a sets of points along the latitude between a minimum and # maximum latitude. set Minimum latitude = -90. # Parameter for the uniform distribution sampling scheme: Gravity may be # calculated for a sets of points along the longitude between a minimum # and maximum longitude. set Minimum longitude = -180. # Parameter for the map sampling scheme: Minimum radius may be defined in # or outside the model. Prescribe a minimum radius for a sampling coverage # at a specific height. set Minimum radius = 0. # Parameter for the fibonacci spiral sampling scheme: This specifies the # desired number of satellites per radius layer. The default value is 200. # Note that sampling becomes more uniform with increasing number of # satellites set Number points fibonacci spiral = 200 # Parameter for the map sampling scheme: This specifies the number of # points along the latitude (e.g. gravity map) between a minimum and # maximum latitude. set Number points latitude = 1 # Parameter for the map sampling scheme: This specifies the number of # points along the longitude (e.g. gravity map) between a minimum and # maximum longitude. set Number points longitude = 1 # Parameter for the map sampling scheme: This specifies the number of # points along the radius (e.g. depth profile) between a minimum and # maximum radius. set Number points radius = 1 # Set the precision of gravity acceleration, potential and gradients in # the gravity output and statistics file. set Precision in gravity output = 12 # Quadrature degree increase over the velocity element degree may be # required when gravity is calculated near the surface or inside the # model. An increase in the quadrature element adds accuracy to the # gravity solution from noise due to the model grid. set Quadrature degree increase = 0 # Gravity anomalies may be computed using density anomalies relative to a # reference density. set Reference density = 3300. # Choose the sampling scheme. By default, the map produces a grid of # equally angled points between a minimum and maximum radius, longitude, # and latitude. A list of points contains the specific coordinates of the # satellites. The fibonacci spiral sampling scheme produces a uniformly # distributed map on the surface of sphere defined by a minimum and/or # maximum radius. set Sampling scheme = map # The time interval between each generation of gravity output files. A # value of 0 indicates that output should be generated in each time step. # Units: years if the 'Use years in output instead of seconds' parameter # is set; seconds otherwise. set Time between gravity output = 1e8 # The maximum number of time steps between each generation of gravity # output files. set Time steps between gravity output = 2147483647 end subsection Memory statistics # If set to 'true', also output the peak virtual memory usage (computed as # the maximum over all processors). set Output peak virtual memory (VmPeak) = true end subsection Particles # By default, every cell needs to contain particles to use this # interpolator plugin. If this parameter is set to true, cells are allowed # to have no particles, in which case the interpolator will return 0 for # the cell's properties. set Allow cells without particles = false # A comma separated list of file formats to be used for graphical output. # The list of possible output formats that can be given here is documented # in the appendix of the manual where the current parameter is described. set Data output format = vtu # A comma seperated list of strings which exclude all particleproperty # fields which contain these strings. If one of the entries is 'all', only # a id will be provided for every point. set Exclude output properties = # This parameter is used to decide which method to use to solve the # equation that describes the position of particles, i.e., # $\frac{d}{dt}\mathbf x_k(t) = \mathbf u(\mathbf x_k(t),t)$, where $k$ is # an index that runs over all particles, and $\mathbf u(\mathbf x,t)$ is # the velocity field that results from the Stokes equations. # # In practice, the exact velocity $\mathbf u(\mathbf x,t)$ is of course # not available, but only a numerical approximation $\mathbf u_h(\mathbf # x,t)$. Furthermore, this approximation is only available at discrete # time steps, $\mathbf u^n(\mathbf x)=\mathbf u(\mathbf x,t^n)$, and these # need to be interpolated between time steps if the integrator for the # equation above requires an evaluation at time points between the # discrete time steps. If we denote this interpolation in time by # $\tilde{\mathbf u}_h(\mathbf x,t)$ where $\tilde{\mathbf u}_h(\mathbf # x,t^n)=\mathbf u^n(\mathbf x)$, then the equation the differential # equation solver really tries to solve is $\frac{d}{dt}\tilde{\mathbf # x}_k(t) = \tilde{\mathbf u}_h(\mathbf x_k(t),t)$. # # As a consequence of these considerations, if you try to assess # convergence properties of an ODE integrator -- for example to verify # that the RK4 integrator converges with fourth order --, it is important # to recall that the integrator may not solve the equation you think it # solves. If, for example, we call the numerical solution of the ODE # $\tilde{\mathbf x}_{k,h}(t)$, then the error will typically satisfy a # relationship like $$ \| \tilde{\mathbf x}_k(T) - \tilde{\mathbf # x}_{k,h}(T) \| \le C(T) \Delta t^p$$ where $\Delta t$ is the time step # and $p$ the convergence order of the method, and $C(T)$ is a (generally # unknown) constant that depends on the end time $T$ at which one compares # the solutions. On the other hand, an analytically computed trajectory # would likely use the \textit{exact} velocity, and one may be tempted to # compute $\| \mathbf x_k(T) - \tilde{\mathbf x}_{k,h}(T) \|$, but this # quantity will, in the best case, only satisfy an estimate of the form $$ # \| \mathbf x_k(T) - \tilde{\mathbf x}_{k,h}(T) \| \le C_1(T) \Delta # t^p + C_2(T) \| \mathbf u-\mathbf u_h \| + C_3(T) \| \mathbf # u_h-\tilde{\mathbf u}_h \|$$ with appropriately chosen norms for the # second and third term. These second and third terms typically converge # to zero at relatively low rates (compared to the order $p$ of the # integrator, which can often be chosen relatively high) in the mesh size # $h$ and the time step size $\\Delta t$, limiting the overall accuracy of # the ODE integrator. # # Select one of the following models: # # `euler': Explicit Euler scheme integrator, where $y_{n+1} = y_n + \Delta # t \, v(y_n)$. This requires only one integration substep per timestep. # # `rk2': Second Order Runge Kutta integrator $y_{n+1} = y_n + \Delta t\, # v(t_{n+1/2}, y_{n} + \frac{1}{2} k_1)$ where $k_1 = \Delta t\, v(t_{n}, # y_{n})$ # # `rk4': Runge Kutta fourth order integrator, where $y_{n+1} = y_n + # \frac{1}{6} k_1 + \frac{1}{3} k_2 + \frac{1}{3} k_3 + \frac{1}{6} k_4$ # and $k_1$, $k_2$, $k_3$, $k_4$ are defined as usual. set Integration scheme = rk2 # Select one of the following models: # # `bilinear least squares': Interpolates particle properties onto a vector # of points using a bilinear least squares method. Currently only 2D # models are supported. Note that deal.II must be configured with # BLAS/LAPACK. # # `cell average': Return the average of all particle properties in the # given cell. # # `harmonic average': Return the harmonic average of all particle # properties in the given cell. If the cell contains no particles, return # the harmonic average of the properties in the neighboring cells. # # `nearest neighbor': Return the properties of the nearest neighboring # particle in the current cell, or nearest particle in nearest neighboring # cell if current cell is empty. set Interpolation scheme = cell average # A comma separated list of particle properties that should be tracked. By # default none is selected, which means only position, velocity and id of # the particles are output. # # The following properties are available: # # `composition': Implementation of a plugin in which the particle property # is defined by the compositional fields in the model. This can be used to # track solid compositionevolution over time. # # `function': Implementation of a model in which the particle property is # set by evaluating an explicit function at the initial position of each # particle. The function is defined in the parameters in section # ``Particles|Function''. The format of these functions follows the syntax # understood by the muparser library, see # Section~\ref{sec:muparser-format}. # # `initial composition': Implementation of a plugin in which the particle # property is given as the initial composition at the particle's initial # position. The particle gets as many properties as there are # compositional fields. # # `initial position': Implementation of a plugin in which the particle # property is given as the initial position of the particle. This property # is vector-valued with as many components as there are space dimensions. # In practice, it is often most useful to only visualize one of the # components of this vector, or the magnitude of the vector. For example, # in a spherical mantle simulation, the magnitude of this property equals # the starting radius of a particle, and is thereby indicative of which # part of the mantle a particle comes from. # # `integrated strain': A plugin in which the particle property tensor is # defined as the deformation gradient tensor $\mathbf F$ this particle has # experienced. $\mathbf F$ can be polar-decomposed into the left # stretching tensor $\mathbf L$ (the finite strain we are interested in), # and the rotation tensor $\mathbf Q$. See the corresponding cookbook in # the manual for more detailed information. # # `integrated strain invariant': A plugin in which the particle property # is defined as the finite strain invariant ($\varepsilon_{ii}$). This # property is calculated with the timestep ($dt$) and the second invariant # of the deviatoric strain rate tensor ($\dot{\varepsilon}_{ii}$), where # the value at time step $n$ is $\varepsilon_{ii}^{n} = # \varepsilon_{ii}^{n-1} + dt\dot{\varepsilon}_{ii}$. # # `melt particle': Implementation of a plugin in which the particle # property is defined as presence of melt above a threshold, which can be # set as an input parameter. This property is set to 0 if melt is not # present and set to 1 if melt is present. # # `pT path': Implementation of a plugin in which the particle property is # defined as the current pressure and temperature at this position. This # can be used to generate pressure-temperature paths of material points # over time. # # `position': Implementation of a plugin in which the particle property is # defined as the current position. # # `velocity': Implementation of a plugin in which the particle property is # defined as the recent velocity at this position. # # `viscoplastic strain invariants': A plugin that calculates the finite # strain invariant a particle has experienced and assigns it to either the # plastic and/or viscous strain field based on whether the material is # plastically yielding, or the total strain field used in the visco # plastic material model. The implementation of this property is # equivalent to the implementation for compositional fields that is # located in the plugin in # \texttt{benchmarks/buiter\_et\_al\_2008\_jgr/plugin/},and is effectively # the same as what the visco plastic material model uses for compositional # fields. set List of particle properties = # Strategy that is used to balance the computational load across # processors for adaptive meshes. set Load balancing strategy = repartition # Upper limit for particle number per cell. This limit is useful for # adaptive meshes to prevent coarse cells from slowing down the whole # model. It will be checked and enforced after mesh refinement, after MPI # transfer of particles and after particle movement. If there are # \texttt{n\_number\_of\_particles} $>$ \texttt{max\_particles\_per\_cell} # particles in one cell then \texttt{n\_number\_of\_particles} - # \texttt{max\_particles\_per\_cell} particles in this cell are randomly # chosen and destroyed. set Maximum particles per cell = 100 # Lower limit for particle number per cell. This limit is useful for # adaptive meshes to prevent fine cells from being empty of particles. It # will be checked and enforced after mesh refinement and after particle # movement. If there are \texttt{n\_number\_of\_particles} $<$ # \texttt{min\_particles\_per\_cell} particles in one cell then # \texttt{min\_particles\_per\_cell} - \texttt{n\_number\_of\_particles} # particles are generated and randomly placed in this cell. If the # particles carry properties the individual property plugins control how # the properties of the new particles are initialized. set Minimum particles per cell = 0 # VTU file output supports grouping files from several CPUs into a given # number of files using MPI I/O when writing on a parallel filesystem. # Select 0 for no grouping. This will disable parallel file output and # instead write one file per processor. A value of 1 will generate one big # file containing the whole solution, while a larger value will create # that many files (at most as many as there are MPI ranks). set Number of grouped files = 16 # Total number of particles to create (not per processor or per element). # The number is parsed as a floating point number (so that one can # specify, for example, '1e4' particles) but it is interpreted as an # integer, of course. set Number of particles = 1000 # Select one of the following models: # # `ascii file': Generates a distribution of particles from coordinates # specified in an Ascii data file. The file format is a simple text file, # with as many columns as spatial dimensions and as many lines as # particles to be generated. Initial comment lines starting with `#' will # be discarded. Note that this plugin always generates as many particles # as there are coordinates in the data file, the # ``Postprocess/Particles/Number of particles'' parameter has no effect on # this plugin. All of the values that define this generator are read from # a section ``Postprocess/Particles/Generator/Ascii file'' in the input # file, see # Section~\ref{parameters:Postprocess/Particles/Generator/Ascii_20file}. # # `probability density function': Generate a random distribution of # particles over the entire simulation domain. The probability density is # prescribed in the form of a user-prescribed function. The format of this # function follows the syntax understood by the muparser library, see # Section~\ref{sec:muparser-format}. The return value of the function is # always checked to be a non-negative probability density but it can be # zero in parts of the domain. # # `quadrature points': Generates particles at the quadrature points of # each active cell of the triangulation. Here, Gauss quadrature of degree # (velocity\_degree + 1), is used similarly to the assembly of Stokes # matrix. # # `random uniform': Generates a random uniform distribution of particles # over the entire simulation domain. # # `reference cell': Generates a uniform distribution of particles per cell # and spatial direction in the unit cell and transforms each of the # particles back to real region in the model domain. Uniform here means # the particles will be generated with an equal spacing in each spatial # dimension # # `uniform box': Generate a uniform distribution of particles over a # rectangular domain in 2D or 3D. Uniform here means the particles will be # generated with an equal spacing in each spatial dimension. Note that in # order to produce a regular distribution the number of generated # particles might not exactly match the one specified in the input file. # # `uniform radial': Generate a uniform distribution of particles over a # spherical domain in 2D or 3D. Uniform here means the particles will be # generated with an equal spacing in each spherical spatial dimension, # i.e., the particles are created at positions that increase linearly with # equal spacing in radius, colatitude and longitude around a certain # center point. Note that in order to produce a regular distribution the # number of generated particles might not exactly match the one specified # in the input file. set Particle generator name = random uniform # Weight that is associated with the computational load of a single # particle. The sum of particle weights will be added to the sum of cell # weights to determine the partitioning of the mesh if the `repartition' # particle load balancing strategy is selected. The optimal weight depends # on the used integrator and particle properties. In general for a more # expensive integrator and more expensive properties a larger particle # weight is recommended. Before adding the weights of particles, each cell # already carries a weight of 1000 to account for the cost of field-based # computations. set Particle weight = 10 # On large clusters it can be advantageous to first write the output to a # temporary file on a local file system and later move this file to a # network file system. If this variable is set to a non-empty string it # will be interpreted as a temporary storage location. set Temporary output location = # The time interval between each generation of output files. A value of # zero indicates that output should be generated every time step. # # Units: years if the 'Use years in output instead of seconds' parameter # is set; seconds otherwise. set Time between data output = 1e8 # Some particle interpolation algorithms require knowledge about particles # in neighboring cells. To allow this, particles in ghost cells need to be # exchanged between the processes neighboring this cell. This parameter # determines whether this transport is happening. set Update ghost particles = false # File operations can potentially take a long time, blocking the progress # of the rest of the model run. Setting this variable to `true' moves this # process into a background thread, while the rest of the model # continues. set Write in background thread = false subsection Function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric # value everywhere the constant appears. These values can be defined # using this parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or # multiplication, as well as all of the common functions such as `sin' # or `cos'. In addition, it may contain expressions like `if(x>0, 1, # -1)' where the expression evaluates to the second argument if the # first argument is true, and to the third argument otherwise. For a # full overview of possible expressions accepted see the documentation # of the muparser library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The number of function components where each component is described by # a function expression delimited by a ';'. set Number of components = 1 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' # (in 3d) for spatial coordinates and `t' for time. You can then use # these variable names in your function expression and they will be # replaced by the values of these variables at which the function is # currently evaluated. However, you can also choose a different set of # names for the independent variables at which to evaluate your function # expression. For example, if you work in spherical coordinates, you may # wish to set this input parameter to `r,phi,theta,t' and then use these # variable names in your function expression. set Variable names = x,y,t end subsection Generator subsection Ascii file # The name of a directory that contains the particle data. This path # may either be absolute (if starting with a '/') or relative to the # current directory. The path may also include the special text # '$ASPECT_SOURCE_DIR' which will be interpreted as the path in which # the ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in # the `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/particle/generator/ascii/ # The name of the particle file. set Data file name = particle.dat end subsection Probability density function # Sometimes it is convenient to use symbolic constants in the # expression that describes the function, rather than having to use # its numeric value everywhere the constant appears. These values can # be defined using this parameter, in the form `var1=value1, # var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines # both `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or # multiplication, as well as all of the common functions such as `sin' # or `cos'. In addition, it may contain expressions like `if(x>0, 1, # -1)' where the expression evaluates to the second argument if the # first argument is true, and to the third argument otherwise. For a # full overview of possible expressions accepted see the documentation # of the muparser library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued # function with multiple components, then separate the expressions for # individual components by a semicolon. set Function expression = 0 # If true, particle numbers per cell are calculated randomly according # to their respective probability density. This means particle numbers # per cell can deviate statistically from the integral of the # probability density. If false, first determine how many particles # each cell should have based on the integral of the density over each # of the cells, and then once we know how many particles we want on # each cell, choose their locations randomly within each cell. set Random cell selection = true # The seed for the random number generator that controls the particle # generation. Keep constant to generate identical particle # distributions in subsequent model runs. Change to get a different # distribution. In parallel computations the seed is further modified # on each process to ensure different particle patterns on different # processes. Note that the number of particles per processor is not # affected by the seed. set Random number seed = 5432 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' # (in 3d) for spatial coordinates and `t' for time. You can then use # these variable names in your function expression and they will be # replaced by the values of these variables at which the function is # currently evaluated. However, you can also choose a different set of # names for the independent variables at which to evaluate your # function expression. For example, if you work in spherical # coordinates, you may wish to set this input parameter to # `r,phi,theta,t' and then use these variable names in your function # expression. set Variable names = x,y,t end subsection Reference cell # List of number of particles to create per cell and spatial # dimension. The size of the list is the number of spatial dimensions. # If only one value is given, then each spatial dimension is set to # the same value. The list of numbers are parsed as a floating point # number (so that one can specify, for example, '1e4' particles) but # it is interpreted as an integer, of course. set Number of particles per cell per direction = 2 end subsection Uniform box # Maximum x coordinate for the region of particles. set Maximum x = 1. # Maximum y coordinate for the region of particles. set Maximum y = 1. # Maximum z coordinate for the region of particles. set Maximum z = 1. # Minimum x coordinate for the region of particles. set Minimum x = 0. # Minimum y coordinate for the region of particles. set Minimum y = 0. # Minimum z coordinate for the region of particles. set Minimum z = 0. end subsection Uniform radial # x coordinate for the center of the spherical region, where particles # are generated. set Center x = 0. # y coordinate for the center of the spherical region, where particles # are generated. set Center y = 0. # z coordinate for the center of the spherical region, where particles # are generated. set Center z = 0. # Maximum latitude coordinate for the region of particles in degrees. # Measured from the center position, and from the north pole. set Maximum latitude = 180. # Maximum longitude coordinate for the region of particles in degrees. # Measured from the center position. set Maximum longitude = 360. # Maximum radial coordinate for the region of particles. Measured from # the center position. set Maximum radius = 1. # Minimum latitude coordinate for the region of particles in degrees. # Measured from the center position, and from the north pole. set Minimum latitude = 0. # Minimum longitude coordinate for the region of particles in degrees. # Measured from the center position. set Minimum longitude = 0. # Minimum radial coordinate for the region of particles. Measured from # the center position. set Minimum radius = 0. # The number of radial shells of particles that will be generated # around the central point. set Radial layers = 1 end end subsection Interpolator subsection Bilinear least squares # The maximum global particle property values that will be used as a # limiter for the bilinear least squares interpolation. The number of # the input 'Global particle property maximum' values separated by ',' # has to be the same as the number of particle properties. set Global particle property maximum = 1.7976931348623157e+308 # The minimum global particle property that will be used as a limiter # for the bilinear least squares interpolation. The number of the # input 'Global particle property minimum' values separated by ',' has # to be the same as the number of particle properties. set Global particle property minimum = -1.7976931348623157e+308 # Whether to apply a global particle property limiting scheme to the # interpolated particle properties. set Use limiter = false end end subsection Melt particle # The minimum porosity that has to be present at the position of a # particle for it to be considered a melt particle (in the sense that # the melt presence property is set to 1). set Threshold for melt presence = 1e-3 end end subsection Point values # The list of points at which the solution should be evaluated. Points # need to be separated by semicolons, and coordinates of each point need # to be separated by commas. set Evaluation points = # The time interval between each generation of point values output. A # value of zero indicates that output should be generated in each time # step. Units: years if the 'Use years in output instead of seconds' # parameter is set; seconds otherwise. set Time between point values output = 0. # Whether or not the Evaluation points are specified in the natural # coordinates of the geometry model, e.g. radius, lon, lat for the chunk # model. Currently, natural coordinates for the spherical shell and sphere # geometries are not supported. set Use natural coordinates = false end subsection Rotation statistics # Whether to write the full moment of inertia tensor into the statistics # output instead of its norm for the current rotation axis. This is a # second-order symmetric tensor with 6 components in 3D. In 2D this option # has no effect, because the rotation axis is fixed and thus the moment of # inertia is always a scalar. set Output full moment of inertia tensor = false # Whether to use a constant density of one for the computation of the # angular momentum and moment of inertia. This is an approximation that # assumes that the 'volumetric' rotation is equal to the 'mass' rotation. # If this parameter is true this postprocessor computes 'net rotation' # instead of 'angular momentum'. set Use constant density of one = false end subsection Topography # Whether or not to write topography to a text file named named # 'topography.NNNNN' in the output directory set Output to file = false # The time interval between each generation of text output files. A value # of zero indicates that output should be generated in each time step. # Units: years if the 'Use years in output instead of seconds' parameter # is set; seconds otherwise. set Time between text output = 0. end subsection Visualization # deal.II offers the possibility to filter duplicate vertices for HDF5 # output files. This merges the vertices of adjacent cells and therefore # saves disk space, but misrepresents discontinuous output properties. # Activating this function reduces the disk space by about a factor of # $2^{dim}$ for HDF5 output, and currently has no effect on other output # formats. \note{\textbf{Warning:} Setting this flag to true will result # in visualization output that does not accurately represent discontinuous # fields. This may be because you are using a discontinuous finite element # for the pressure, temperature, or compositional variables, or because # you use a visualization postprocessor that outputs quantities as # discontinuous fields (e.g., the strain rate, viscosity, etc.). These # will then all be visualized as \textit{continuous} quantities even # though, internally, \aspect{} considers them as discontinuous fields.} set Filter output = false # deal.II offers the possibility to linearly interpolate output fields of # higher order elements to a finer resolution. This somewhat compensates # the fact that most visualization software only offers linear # interpolation between grid points and therefore the output file is a # very coarse representation of the actual solution field. Activating this # option increases the spatial resolution in each dimension by a factor # equal to the polynomial degree used for the velocity finite element # (usually 2). In other words, instead of showing one quadrilateral or # hexahedron in the visualization per cell on which \aspect{} computes, it # shows multiple (for quadratic elements, it will describe each cell of # the mesh on which we compute as $2\times 2$ or $2\times 2\times 2$ cells # in 2d and 3d, respectively; correspondingly more subdivisions are used # if you use cubic, quartic, or even higher order elements for the # velocity). # # The effect of using this option can be seen in the following picture # showing a variation of the output produced with the input files from # Section~\ref{sec:shell-simple-2d}: # # \begin{center} # \includegraphics[width=0.5\textwidth]{viz/parameters/build-patches}\end{center}Here, # the left picture shows one visualization cell per computational cell # (i.e., the option is switched off), and the right picture shows the same # simulation with the option switched on (which is the default). The # images show the same data, demonstrating that interpolating the solution # onto bilinear shape functions as is commonly done in visualizing data # loses information. # # Of course, activating this option also greatly increases the amount of # data \aspect{} will write to disk: approximately by a factor of 4 in 2d, # and a factor of 8 in 3d, when using quadratic elements for the velocity, # and correspondingly more for even higher order elements. set Interpolate output = true # A comma separated list of visualization objects that should be run # whenever writing graphical output. By default, the graphical output # files will always contain the primary variables velocity, pressure, and # temperature. However, one frequently wants to also visualize derived # quantities, such as the thermodynamic phase that corresponds to a given # temperature-pressure value, or the corresponding seismic wave speeds. # The visualization objects do exactly this: they compute such derived # quantities and place them into the output file. The current parameter is # the place where you decide which of these additional output variables # you want to have in your output file. # # The following postprocessors are available: # # `ISA rotation timescale': A visualization output object that generates # output showing the timescale for the rotation of grains toward the # infinite strain axis. Kaminski and Ribe (2002, Gcubed) call this # quantity $\tau_{ISA}$ and define it as $\tau_{ISA} \approx # \frac{1}{\dot{\epsilon}}$ where $\dot{\epsilon}$ is the largest # eigenvalue of the strain rate tensor. It can be used, along with the # grain lag angle $\Theta$, to calculate the grain orientation lag # parameter. # # `Vp anomaly': A visualization output object that generates output # showing the percentage anomaly in the seismic compressional wave speed # $V_p$ as a spatially variable function with one value per cell. This # anomaly is either shown as a percentage anomaly relative to the # reference profile given by adiabatic conditions (with the compositions # given by the current composition, such that the reference could # potentially change through time), or as a percentage change relative to # the laterally averaged velocity at the depth of the cell. This velocity # is calculated by linear interpolation between average values calculated # within equally thick depth slices. The number of depth slices in the # domain is user-defined. Typically, the best results will be obtained if # the number of depth slices is balanced between being large enough to # capture step changes in velocities, but small enough to maintain a # reasonable number of evaluation points per slice. Bear in mind that # lateral averaging subsamples the finite element mesh. Note that this # plugin requires a material model that provides seismic velocities. # # `Vs anomaly': A visualization output object that generates output # showing the percentage anomaly in the seismic shear wave speed $V_s$ as # a spatially variable function with one value per cell. This anomaly is # either shown as a percentage anomaly relative to the reference profile # given by adiabatic conditions (with the compositions given by the # current composition, such that the reference could potentially change # through time), or as a percentage change relative to the laterally # averaged velocity at the depth of the cell. This velocity is calculated # by linear interpolation between average values calculated within equally # thick depth slices. The number of depth slices in the domain is # user-defined. Typically, the best results will be obtained if the number # of depth slices is balanced between being large enough to capture step # changes in velocities, but small enough to maintain a reasonable number # of evaluation points per slice. Bear in mind that lateral averaging # subsamples the finite element mesh. Note that this plugin requires a # material model that provides seismic velocities. # # `adiabat': A visualization output object that generates adiabatic # temperature, pressure, density, and density derivative as produced by # AdiabaticConditions. # # `artificial viscosity': A visualization output object that generates # output showing the value of the artificial viscosity on each cell. # # `artificial viscosity composition': A visualization output object that # generates output showing the value of the artificial viscosity for a # compositional field on each cell. # # `boundary indicators': A visualization output object that generates # output about the used boundary indicators. In a loop over the active # cells, if a cell lies at a domain boundary, the boundary indicator of # the face along the boundary is requested. In case the cell does not lie # along any domain boundary, the cell is assigned the value of the largest # used boundary indicator plus one. When a cell is situated in one of the # corners of the domain, multiple faces will have a boundary indicator. # This postprocessor returns the value of the first face along a boundary # that is encountered in a loop over all the faces. # # `compositional vector': A visualization output object that outputs # vectors whose components are derived from compositional fields. Input # parameters for this postprocessor are defined in section # Postprocess/Visualization/Compositional fields as vectors # # `depth': A visualization output postprocessor that outputs the depth for # all points inside the domain, as determined by the geometry model. # # `dynamic topography': A visualization output object that generates # output for the dynamic topography at the top and bottom of the model # space. The approach to determine the dynamic topography requires us to # compute the stress tensor and evaluate the component of it in the # direction in which gravity acts. In other words, we compute # $\sigma_{rr}={\hat g}^T(2 \eta \varepsilon(\mathbf u)-\frac 13 # (\textrm{div}\;\mathbf u)I)\hat g - p_d$ where $\hat g = \mathbf # g/\|\mathbf g\|$ is the direction of the gravity vector $\mathbf g$ and # $p_d=p-p_a$ is the dynamic pressure computed by subtracting the # adiabatic pressure $p_a$ from the total pressure $p$ computed as part of # the Stokes solve. From this, the dynamic topography is computed using # the formula $h=\frac{\sigma_{rr}}{(\mathbf g \cdot \mathbf n) \rho}$ # where $\rho$ is the density at the cell center. For the bottom surface # we chose the convection that positive values are up (out) and negative # values are in (down), analogous to the deformation of the upper surface. # Note that this implementation takes the direction of gravity into # account, which means that reversing the flow in backward advection # calculations will not reverse the instantaneous topography because the # reverse flow will be divided by the reverse surface gravity. # # Strictly speaking, the dynamic topography is of course a quantity that # is only of interest at the surface. However, we compute it everywhere to # make things fit into the framework within which we produce data for # visualization. You probably only want to visualize whatever data this # postprocessor generates at the surface of your domain and simply ignore # the rest of the data generated. # # `error indicator': A visualization output object that generates output # showing the estimated error or other mesh refinement indicator as a # spatially variable function with one value per cell. # # `geoid': Visualization for the geoid solution. The geoid is given by the # equivalent water column height due to a gravity perturbation. Units: # $\si{m}$. # # `grain lag angle': A visualization output object that generates output # showing the angle between the ~infinite strain axis and the flow # velocity. Kaminski and Ribe (2002, Gcubed) call this quantity $\Theta$ # and define it as $\Theta = \cos^{-1}(\hat{u}\cdot\hat{e})$ where # $\hat{u}=\vec{u}/|{u}|$, $\vec{u}$ is the local flow velocity, and # $\hat{e}$ is the local infinite strain axis, which we calculate as the # first eigenvector of the 'left stretch' tensor. $\Theta$ can be used to # calculate the grain orientation lag parameter. # # `gravity': A visualization output object that outputs the gravity # vector. # # `heat flux map': A visualization output object that generates output for # the heat flux density across the top and bottom boundary in outward # direction. The heat flux is computed as sum of advective heat flux and # conductive heat flux through Neumann boundaries, both computed as # integral over the boundary area, and conductive heat flux through # Dirichlet boundaries, which is computed using the consistent boundary # flux method as described in ``Gresho, Lee, Sani, Maslanik, Eaton (1987). # The consistent Galerkin FEM for computing derived boundary quantities in # thermal and or fluids problems. International Journal for Numerical # Methods in Fluids, 7(4), 371-394.'' If only conductive heat flux through # Dirichlet boundaries is of interest, the postprocessor can produce # output of higher resolution by evaluating the CBF solution vector # point-wise instead of computing cell-wise averaged values. # # `heating': A visualization output object that generates output for all # the heating terms used in the energy equation. # # `material properties': A visualization output object that generates # output for the material properties given by the material model.There are # a number of other visualization postprocessors that offer to write # individual material properties. However, they all individually have to # evaluate the material model. This is inefficient if one wants to output # more than just one or two of the fields provided by the material model. # The current postprocessor allows to output a (potentially large) subset # of all of the information provided by material models at once, with just # a single material model evaluation per output point. # # `maximum horizontal compressive stress': A plugin that computes the # direction and magnitude of the maximum horizontal component of the # compressive stress as a vector field. The direction of this vector can # often be used to visualize the principal mode of deformation (e.g., at # normal faults or extensional margins) and can be correlated with seismic # anisotropy. Recall that the \textit{compressive} stress is simply the # negative stress, $\sigma_c=-\sigma=-\left[ 2\eta # (\varepsilon(\mathbf u) - \frac 13 (\nabla \cdot \mathbf u) # I) + pI\right]$. # # Following \cite{LundTownend07}, we define the maximum horizontal stress # direction as that \textit{horizontal} direction $\mathbf n$ that # maximizes $\mathbf n^T \sigma_c \mathbf n$. We call a vector # \textit{horizontal} if it is perpendicular to the gravity vector # $\mathbf g$. # # In two space dimensions, $\mathbf n$ is simply a vector that is # horizontal (we choose one of the two possible choices). This direction # is then scaled by the size of the horizontal stress in this direction, # i.e., the plugin outputs the vector $\mathbf w = (\mathbf n^T \sigma_c # \mathbf n) \; \mathbf n$. # # In three space dimensions, given two horizontal, perpendicular, unit # length, but otherwise arbitrarily chosen vectors $\mathbf u,\mathbf v$, # we can express $\mathbf n = (\cos \alpha)\mathbf u + (\sin\alpha)\mathbf # v$ where $\alpha$ maximizes the expression \begin{align*} f(\alpha) = # \mathbf n^T \sigma_c \mathbf n = (\mathbf u^T \sigma_c \mathbf # u)(\cos\alpha)^2 +2(\mathbf u^T \sigma_c \mathbf # v)(\cos\alpha)(\sin\alpha) +(\mathbf v^T \sigma_c \mathbf # v)(\sin\alpha)^2.\end{align*} # # The maximum of $f(\alpha)$ is attained where $f'(\alpha)=0$. Evaluating # the derivative and using trigonometric identities, one finds that # $\alpha$ has to satisfy the equation \begin{align*} \tan(2\alpha) = # \frac{2.0\mathbf u^T \sigma_c \mathbf v} # {\mathbf u^T \sigma_c \mathbf u - \mathbf v^T # \sigma_c \mathbf v}.\end{align*}Since the transform # $\alpha\mapsto\alpha+\pi$ flips the direction of $\mathbf n$, we only # need to seek a solution to this equation in the interval # $\alpha\in[0,\pi)$. These are given by $\alpha_1=\frac 12 \arctan # \frac{\mathbf u^T \sigma_c \mathbf v}{\mathbf u^T \sigma_c \mathbf u - # \mathbf v^T \sigma_c \mathbf v}$ and $\alpha_2=\alpha_1+\frac{\pi}{2}$, # one of which will correspond to a minimum and the other to a maximum of # $f(\alpha)$. One checks the sign of $f''(\alpha)=-2(\mathbf u^T \sigma_c # \mathbf u - \mathbf v^T \sigma_c \mathbf v)\cos(2\alpha) - 2 (\mathbf # u^T \sigma_c \mathbf v) \sin(2\alpha)$ for each of these to determine # the $\alpha$ that maximizes $f(\alpha)$, and from this immediately # arrives at the correct form for the maximum horizontal stress $\mathbf # n$. # # The description above computes a 3d \textit{direction} vector $\mathbf # n$. If one were to scale this vector the same way as done in 2d, i.e., # with the magnitude of the stress in this direction, one will typically # get vectors whose length is principally determined by the hydrostatic # pressure at a given location simply because the hydrostatic pressure is # the largest component of the overall stress. On the other hand, the # hydrostatic pressure does not determine any principal direction because # it is an isotropic, anti-compressive force. As a consequence, there are # often points in simulations (e.g., at the center of convection rolls) # where the stress has no dominant horizontal direction, and the algorithm # above will then in essence choose a random direction because the stress # is approximately equal in all horizontal directions. If one scaled the # output by the magnitude of the stress in this direction (i.e., # approximately equal to the hydrostatic pressure at this point), one # would get randomly oriented vectors at these locations with significant # lengths. # # To avoid this problem, we scale the maximal horizontal compressive # stress direction $\mathbf n$ by the \textit{difference} between the # stress in the maximal and minimal horizontal stress directions. In other # words, let $\mathbf n_\perp=(\sin \alpha)\mathbf u - (\cos\alpha)\mathbf # v$ be the horizontal direction perpendicular to $\mathbf n$, then this # plugin outputs the vector quantity $\mathbf w = (\mathbf n^T \sigma_c # \mathbf n -\mathbf n^T_\perp \sigma_c \mathbf n_\perp) # \; \mathbf n$. In other words, the length of the vector produced # indicates \textit{how dominant} the direction of maximal horizontal # compressive strength is. # # Fig.~\ref{fig:max-horizontal-compressive-stress} shows a simple example # for this kind of visualization in 3d. # # \begin{figure} \includegraphics[width=0.3\textwidth] # {viz/plugins/maximum_horizontal_compressive_stress/temperature.png} # \hfill \includegraphics[width=0.3\textwidth] # {viz/plugins/maximum_horizontal_compressive_stress/velocity.png} \hfill # \includegraphics[width=0.3\textwidth] # {viz/plugins/maximum_horizontal_compressive_stress/horizontal-stress.png} # \caption{\it Illustration of the `maximum horizontal compressive # stress' visualization plugin. The left figure shows a ridge-like # temperature anomaly. Together with no-slip boundary along all six # boundaries, this results in two convection rolls (center). The # maximal horizontal compressive strength at the bottom center of # the domain is perpendicular to the ridge because the flow comes # together there from the left and right, yielding a compressive force # in left-right direction. At the top of the model, the flow separates # outward, leading to a \textit{negative} compressive stress in # left-right direction; because there is no flow in front-back # direction, the compressive strength in front-back direction is zero, # making the along-ridge direction the dominant one. At the center of # the convection rolls, both horizontal directions yield the same # stress; the plugin therefore chooses an essentially arbitrary # horizontal vector, but then uses a zero magnitude given that the # difference between the maximal and minimal horizontal stress is # zero at these points.} # \label{fig:max-horizontal-compressive-stress}\end{figure} # # `melt fraction': A visualization output object that generates output for # the melt fraction at the temperature and pressure of the current point. # If the material model computes a melt fraction, this is the quantity # that will be visualized. Otherwise, a specific parametrization for batch # melting (as described in the following) will be used. It does not take # into account latent heat. If there are no compositional fields, this # postprocessor will visualize the melt fraction of peridotite (calculated # using the anhydrous model of Katz, 2003). If there is at least one # compositional field, the postprocessor assumes that the first # compositional field is the content of pyroxenite, and will visualize the # melt fraction for a mixture of peridotite and pyroxenite (using the # melting model of Sobolev, 2011 for pyroxenite). All the parameters that # were used in these calculations can be changed in the input file, the # most relevant maybe being the mass fraction of Cpx in peridotite in the # Katz melting model (Mass fraction cpx), which right now has a default of # 15\%. The corresponding p-T-diagrams can be generated by running the # tests melt\_postprocessor\_peridotite and # melt\_postprocessor\_pyroxenite. # # `melt material properties': A visualization output object that generates # output for melt related properties of the material model. Note that this # postprocessor always outputs the compaction pressure, but can output a # large range of additional properties, as selected in the ``List of # properties'' parameter. # # `named additional outputs': Some material models can compute quantities # other than those that typically appear in the equations that \aspect{} # solves (such as the viscosity, density, etc). Examples of quantities # material models may be able to compute are seismic velocities, or other # quantities that can be derived from the state variables and the material # coefficients such as the stress or stress anisotropies. These quantities # are generically referred to as `named outputs' because they are given an # explicit name different from the usual outputs of material models. # # This visualization postprocessor outputs whatever quantities the # material model can compute. What quantities these are is specific to the # material model in use for a simulation, and for many models in fact does # not contain any named outputs at all. # # `nonadiabatic pressure': A visualization output object that generates # output for the non-adiabatic component of the pressure. # # The variable that is outputted this way is computed by taking the # pressure at each point and subtracting from it the adiabatic pressure # computed at the beginning of the simulation. Because the adiabatic # pressure is one way of defining a static pressure background field, what # this visualization postprocessor therefore produces is \textit{one} way # to compute a \textit{dynamic pressure}. There are, however, other ways # as well, depending on the choice of the ``background pressure''. # # `nonadiabatic temperature': A visualization output object that generates # output for the non-adiabatic component of the temperature. # # `particle count': A visualization output object that generates output # about the number of particles per cell. # # `partition': A visualization output object that generates output for the # parallel partition that every cell of the mesh is associated with. # # `shear stress': A visualization output object that generates output for # the 3 (in 2d) or 6 (in 3d) components of the shear stress tensor, i.e., # for the components of the tensor $2\eta\varepsilon(\mathbf u)$ in the # incompressible case and $2\eta\left[\varepsilon(\mathbf u)-\tfrac # 13(\textrm{tr}\;\varepsilon(\mathbf u))\mathbf I\right]$ in the # compressible case. The shear stress differs from the full stress tensor # by the absence of the pressure. # # `spd factor': A visualization output object that generates output for # the spd factor. The spd factor is a factor which scales a part of the # Jacobian used for the Newton solver to make sure that the Jacobian # remains positive definite. # # `spherical velocity components': A visualization output object that # outputs the polar coordinates components $v_r$ and $v_\phi$ of the # velocity field in 2D and the spherical coordinates components $v_r$, # $v_{\phi}$ and $v_{\theta}$ of the velocity field in 3D. # # `strain rate': A visualization output object that generates output for # the norm of the strain rate, i.e., for the quantity # $\sqrt{\varepsilon(\mathbf u):\varepsilon(\mathbf u)}$ in the # incompressible case and $\sqrt{[\varepsilon(\mathbf u)-\tfrac # 13(\textrm{tr}\;\varepsilon(\mathbf u))\mathbf I]:[\varepsilon(\mathbf # u)-\tfrac 13(\textrm{tr}\;\varepsilon(\mathbf u))\mathbf I]}$ in the # compressible case. # # `strain rate tensor': A visualization output object that generates # output for the 4 (in 2d) or 9 (in 3d) components of the strain rate # tensor, i.e., for the components of the tensor $\varepsilon(\mathbf u)$ # in the incompressible case and $\varepsilon(\mathbf u)-\tfrac # 13(\textrm{tr}\;\varepsilon(\mathbf u))\mathbf I$ in the compressible # case. # # `stress': A visualization output object that generates output for the 3 # (in 2d) or 6 (in 3d) components of the stress tensor, i.e., for the # components of the tensor $2\eta\varepsilon(\mathbf u)+pI$ in the # incompressible case and $2\eta\left[\varepsilon(\mathbf u)-\tfrac # 13(\textrm{tr}\;\varepsilon(\mathbf u))\mathbf I\right]+pI$ in the # compressible case. # # `temperature anomaly': A visualization output postprocessor that outputs # the temperature minus the depth-average of the temperature.The average # temperature is calculated using the lateral averaging function from the # ``depth average'' postprocessor and interpolated linearly between the # layers specified through ``Number of depth slices'' # # `vertical heat flux': A visualization output object that generates # output for the heat flux in the vertical direction, which is the sum of # the advective and the conductive heat flux, with the sign convention of # positive flux upwards. # # `volume of fluid values': A visualization output object that outputs the # volume fraction and optionally a level set field and the interface # normal vectors of volume of fluid fields. # # `volumetric strain rate': A visualization output object that generates # output for the volumetric strain rate, i.e., for the quantity # $\nabla\cdot\mathbf u = \textrm{div}\; \mathbf u = \textrm{trace}\; # \varepsilon(\mathbf u)$. This should be zero (in some average sense) in # incompressible convection models, but can be non-zero in compressible # models and models with melt transport. set List of output variables = viscosity, density # default: # VTU file output supports grouping files from several CPUs into a given # number of files using MPI I/O when writing on a parallel filesystem. # Select 0 for no grouping. This will disable parallel file output and # instead write one file per processor. A value of 1 will generate one big # file containing the whole solution, while a larger value will create # that many files (at most as many as there are MPI ranks). set Number of grouped files = 1 # default: 16 # The file format to be used for graphical output. The list of possible # output formats that can be given here is documented in the appendix of # the manual where the current parameter is described. set Output format = vtu # For computations with deforming meshes, ASPECT uses an # Arbitrary-Lagrangian-Eulerian formulation to handle deforming the # domain, so the mesh has its own velocity field. This may be written as # an output field by setting this parameter to true. set Output mesh velocity = false # If set to true, quantities related to stress and strain are computed in # each vertex. Otherwise, an average per cell is computed. set Point-wise stress and strain = false # On large clusters it can be advantageous to first write the output to a # temporary file on a local file system and later move this file to a # network file system. If this variable is set to a non-empty string it # will be interpreted as a temporary storage location. set Temporary output location = # The time interval between each generation of graphical output files. A # value of zero indicates that output should be generated in each time # step. Units: years if the 'Use years in output instead of seconds' # parameter is set; seconds otherwise. set Time between graphical output = 1e8 # The maximum number of time steps between each generation of graphical # output files. set Time steps between graphical output = 25 # default: 2147483647 # deal.II offers the possibility to write vtu files with higher order # representations of the output data. This means each cell will correctly # show the higher order representation of the output data instead of the # linear interpolation between vertices that ParaView and Visit usually # show. Note that activating this option is safe and recommended, but # requires that (i) ``Output format'' is set to ``vtu'', (ii) # ``Interpolate output'' is set to true, (iii) you use a sufficiently new # version of Paraview or Visit to read the files (Paraview version 5.5 or # newer, and Visit version to be determined), and (iv) you use deal.II # version 9.1.0 or newer. # The effect of using this option can be seen in the following picture: # # \begin{center} # \includegraphics[width=0.5\textwidth]{viz/parameters/higher-order-output}\end{center}The # top figure shows the plain output without interpolation or higher order # output. The middle figure shows output that was interpolated as # discussed for the ``Interpolate output'' option. The bottom panel shows # higher order output that achieves better accuracy than the interpolated # output at a lower memory cost. set Write higher order output = false # File operations can potentially take a long time, blocking the progress # of the rest of the model run. Setting this variable to `true' moves this # process into a background thread, while the rest of the model # continues. set Write in background thread = false subsection Artificial viscosity composition # The name of the compositional field whose output should be # visualized. set Name of compositional field = end subsection Compositional fields as vectors # A list of sets of compositional fields which should be output as # vectors. Sets are separated from each other by semicolons and vector # components within each set are separated by commas (e.g. $vec1_x$, # $vec1_y$ ; $vec2_x$, $vec2_y$) where each name must be a defined named # compositional field. If only one name is given in a set, it is # interpreted as the first in a sequence of dim consecutive # compositional fields. set Names of fields = # Names of vectors as they will appear in the output. set Names of vectors = end subsection Heat flux map # A boolean flag that controls whether to output the heat flux map as a # point wise value, or as a cell-wise averaged value. The point wise # output is more accurate, but it currently omits prescribed heat flux # values at boundaries and advective heat flux that is caused by # velocities non-tangential to boundaries. If you do not use these two # features it is recommended to switch this setting on to benefit from # the increased output resolution. set Output point wise heat flux = false end subsection Material properties # A comma separated list of material properties that should be written # whenever writing graphical output. By default, the material properties # will always contain the density, thermal expansivity, specific heat # and viscosity. The following material properties are available: # # viscosity|density|thermal expansivity|specific heat|thermal # conductivity|thermal diffusivity|compressibility|entropy derivative # temperature|entropy derivative pressure|reaction terms|melt fraction set List of material properties = viscosity, density # default: density,thermal expansivity,specific heat,viscosity end subsection Melt fraction # Constant parameter in the quadratic function that approximates the # solidus of peridotite. Units: $\degree C$. set A1 = 1085.7 # Prefactor of the linear pressure term in the quadratic function that # approximates the solidus of peridotite. Units: $\degree C/Pa$. set A2 = 1.329e-7 # Prefactor of the quadratic pressure term in the quadratic function # that approximates the solidus of peridotite. Units: $\degree # C/(Pa^2)$. set A3 = -5.1e-18 # Constant parameter in the quadratic function that approximates the # lherzolite liquidus used for calculating the fraction of # peridotite-derived melt. Units: $\degree C$. set B1 = 1475.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the lherzolite liquidus used for calculating the # fraction of peridotite-derived melt. Units: $\degree C/Pa$. set B2 = 8.0e-8 # Prefactor of the quadratic pressure term in the quadratic function # that approximates the lherzolite liquidus used for calculating the # fraction of peridotite-derived melt. Units: $\degree C/(Pa^2)$. set B3 = -3.2e-18 # Constant parameter in the quadratic function that approximates the # liquidus of peridotite. Units: $\degree C$. set C1 = 1780.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the liquidus of peridotite. Units: $\degree C/Pa$. set C2 = 4.50e-8 # Prefactor of the quadratic pressure term in the quadratic function # that approximates the liquidus of peridotite. Units: $\degree # C/(Pa^2)$. set C3 = -2.0e-18 # Constant parameter in the quadratic function that approximates the # solidus of pyroxenite. Units: $\degree C$. set D1 = 976.0 # Prefactor of the linear pressure term in the quadratic function that # approximates the solidus of pyroxenite. Note that this factor is # different from the value given in Sobolev, 2011, because they use the # potential temperature whereas we use the absolute temperature. Units: # $\degree C/Pa$. set D2 = 1.329e-7 # Prefactor of the quadratic pressure term in the quadratic function # that approximates the solidus of pyroxenite. Units: $\degree # C/(Pa^2)$. set D3 = -5.1e-18 # Prefactor of the linear depletion term in the quadratic function that # approximates the melt fraction of pyroxenite. Units: $\degree C/Pa$. set E1 = 663.8 # Prefactor of the quadratic depletion term in the quadratic function # that approximates the melt fraction of pyroxenite. Units: $\degree # C/(Pa^2)$. set E2 = -611.4 # Mass fraction of clinopyroxene in the peridotite to be molten. Units: # non-dimensional. set Mass fraction cpx = 0.15 # Exponent of the melting temperature in the melt fraction calculation. # Units: non-dimensional. set beta = 1.5 # Constant in the linear function that approximates the clinopyroxene # reaction coefficient. Units: non-dimensional. set r1 = 0.5 # Prefactor of the linear pressure term in the linear function that # approximates the clinopyroxene reaction coefficient. Units: $1/Pa$. set r2 = 8e-11 end subsection Melt material properties # A comma separated list of melt properties that should be written # whenever writing graphical output. The following material properties # are available: # # compaction viscosity|fluid viscosity|permeability|fluid density|fluid # density gradient|is melt cell|darcy coefficient|darcy coefficient no # cutoff|compaction length set List of properties = compaction viscosity,permeability end subsection Temperature anomaly # Number of depth slices used to define average temperature. set Number of depth slices = 20 # If true, use the specified boundary temepratures as average # temperatures at the surface. If false, extrapolate the temperature # gradient between the first and second cells to the surface. This # option will only work for models with a fixed surface temperature. set Use maximal temperature for bottom = true # Whether to use the minimal speficied boundary temperature as the # bottom boundary temperature. This option will only work for models # with a fixed bottom boundary temperature. set Use minimal temperature for surface = true end subsection Volume of Fluid # Include the internal data for the interface normal on the unit cells set Output interface normals = false # Include fields defined such that the 0 contour is the fluid interface set Output interface reconstruction contour = false end subsection Vp anomaly # Scheme to compute the average velocity-depth profile. The reference # profile option evaluates the conditions along the reference adiabat # according to the material model. The lateral average option instead # calculates a lateral average from subdivision of the mesh. The lateral # average option may produce spurious results where there are sharp # velocity changes. set Average velocity scheme = reference profile # Number of depth slices used to define average seismic compressional # wave velocities from which anomalies are calculated. Units: # non-dimensional. set Number of depth slices = 50 end subsection Vs anomaly # Scheme to compute the average velocity-depth profile. The reference # profile option evaluates the conditions along the reference adiabat # according to the material model. The lateral average option instead # calculates a lateral average from subdivision of the mesh. The lateral # average option may produce spurious results where there are sharp # velocity changes. set Average velocity scheme = reference profile # Number of depth slices used to define average seismic shear wave # velocities from which anomalies are calculated. Units: # non-dimensional. set Number of depth slices = 50 end end end subsection Prescribed Stokes solution # Select one of the following models: # # `ascii data': Implementation of a model in which the velocity is derived # from files containing data in ascii format. Note the required format of # the input data: The first lines may contain any number of comments if they # begin with `#', but one of these lines needs to contain the number of grid # points in each dimension as for example `# POINTS: 3 3'. The order of the # data columns has to be `x', `y', `v${}_x$' , `v${}_y$' in a 2d model and # `x', `y', `z', `v${}_x$' , `v${}_y$' , `v${}_z$' in a 3d model. Note that # the data in the input files need to be sorted in a specific order: the # first coordinate needs to ascend first, followed by the second and the # third at last in order to assign the correct data to the prescribed # coordinates. If you use a spherical model, then the data will still be # handled as Cartesian, however the assumed grid changes. `x' will be # replaced by the radial distance of the point to the bottom of the model, # `y' by the azimuth angle and `z' by the polar angle measured positive from # the north pole. The grid will be assumed to be a latitude-longitude grid. # Note that the order of spherical coordinates is `r', `phi', `theta' and # not `r', `theta', `phi', since this allows for dimension independent # expressions. # # `circle': This value describes a vector field that rotates around the # z-axis with constant angular velocity (i.e., with a velocity that # increases with distance from the axis). The pressure is set to zero. # # `function': This plugin allows to prescribe the Stokes solution for the # velocity and pressure field in terms of an explicit formula. The format of # these functions follows the syntax understood by the muparser library, see # Section~\ref{sec:muparser-format}. set Model name = unspecified subsection Ascii data model # The name of a directory that contains the model data. This path may # either be absolute (if starting with a `/') or relative to the current # directory. The path may also include the special text # `$ASPECT_SOURCE_DIR' which will be interpreted as the path in which the # ASPECT source files were located when ASPECT was compiled. This # interpretation allows, for example, to reference files located in the # `data/' subdirectory of ASPECT. set Data directory = $ASPECT_SOURCE_DIR/data/prescribed-stokes-solution/ # The file name of the model data. Provide file in format: (Velocity file # name).\%s\%d where \%s is a string specifying the boundary of the model # according to the names of the boundary indicators (of the chosen # geometry model).\%d is any sprintf integer qualifier, specifying the # format of the current file number. set Data file name = box_2d.txt # Scalar factor, which is applied to the model data. You might want to use # this to scale the input to a reference model. Another way to use this # factor is to convert units of the input files. For instance, if you # provide velocities in cm/yr set this factor to 0.01. set Scale factor = 1. end subsection Compaction pressure function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Fluid pressure function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Fluid velocity function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0; 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Pressure function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end subsection Velocity function # Sometimes it is convenient to use symbolic constants in the expression # that describes the function, rather than having to use its numeric value # everywhere the constant appears. These values can be defined using this # parameter, in the form `var1=value1, var2=value2, ...'. # # A typical example would be to set this runtime parameter to # `pi=3.1415926536' and then use `pi' in the expression of the actual # formula. (That said, for convenience this class actually defines both # `pi' and `Pi' by default, but you get the idea.) set Function constants = # The formula that denotes the function you want to evaluate for # particular values of the independent variables. This expression may # contain any of the usual operations such as addition or multiplication, # as well as all of the common functions such as `sin' or `cos'. In # addition, it may contain expressions like `if(x>0, 1, -1)' where the # expression evaluates to the second argument if the first argument is # true, and to the third argument otherwise. For a full overview of # possible expressions accepted see the documentation of the muparser # library at http://muparser.beltoforion.de/. # # If the function you are describing represents a vector-valued function # with multiple components, then separate the expressions for individual # components by a semicolon. set Function expression = 0; 0 # The names of the variables as they will be used in the function, # separated by commas. By default, the names of variables at which the # function will be evaluated are `x' (in 1d), `x,y' (in 2d) or `x,y,z' (in # 3d) for spatial coordinates and `t' for time. You can then use these # variable names in your function expression and they will be replaced by # the values of these variables at which the function is currently # evaluated. However, you can also choose a different set of names for the # independent variables at which to evaluate your function expression. For # example, if you work in spherical coordinates, you may wish to set this # input parameter to `r,phi,theta,t' and then use these variable names in # your function expression. set Variable names = x,y,t end end subsection Solver parameters # The relative tolerance up to which the linear system for the composition # system gets solved. See `Stokes solver parameters/Linear solver tolerance' # for more details. set Composition solver tolerance = 1e-12 # The relative tolerance up to which the linear system for the temperature # system gets solved. See `Stokes solver parameters/Linear solver tolerance' # for more details. set Temperature solver tolerance = 1e-12 subsection AMG parameters # This threshold tells the AMG setup how the coarsening should be # performed. In the AMG used by ML, all points that strongly couple with # the tentative coarse-level point form one aggregate. The term strong # coupling is controlled by the variable aggregation\_threshold, meaning # that all elements that are not smaller than aggregation\_threshold times # the diagonal element do couple strongly. The default is strongly # recommended. There are indications that for the Newton solver a # different value might be better. For extensive benchmarking of various # settings of the AMG parameters in this section for the Stokes problem # and others, see https://github.com/geodynamics/aspect/pull/234. set AMG aggregation threshold = 0.001 # Turns on extra information on the AMG solver. Note that this will # generate much more output. set AMG output details = false # Determines how many sweeps of the smoother should be performed. When the # flag elliptic is set to true, (which is true for ASPECT), the polynomial # degree of the Chebyshev smoother is set to this value. The term sweeps # refers to the number of matrix-vector products performed in the # Chebyshev case. In the non-elliptic case, this parameter sets the number # of SSOR relaxation sweeps for post-smoothing to be performed. The # default is strongly recommended. There are indications that for the # Newton solver a different value might be better. For extensive # benchmarking of various settings of the AMG parameters in this section # for the Stokes problem and others, see # https://github.com/geodynamics/aspect/pull/234. set AMG smoother sweeps = 2 # This parameter sets the type of smoother for the AMG. The default is # strongly recommended for any normal runs with ASPECT. There are some # indications that the symmetric Gauss-Seidel might be better and more # stable for the Newton solver. For extensive benchmarking of various # settings of the AMG parameters in this section for the Stokes problem # and others, see https://github.com/geodynamics/aspect/pull/234. set AMG smoother type = Chebyshev end subsection Advection solver parameters # This is the number of iterations that define the GMRES solver restart # length. Increasing this parameter makes the solver more robust and # decreases the number of iterations. Be aware that increasing this number # increases the memory usage of the advection solver, and makes individual # iterations more expensive. set GMRES solver restart length = 50 end subsection Diffusion solver parameters # Set a length scale for the diffusion of compositional fields if the # ``prescribed field with diffusion'' method is selected for a field. More # precisely, this length scale represents the square root of the product # of diffusivity and time in the diffusion equation, and controls the # distance over which features are diffused. Units: $\si{m}$. set Diffusion length scale = 1.e4 end subsection Newton solver parameters # The maximum number of line search iterations allowed. If the criterion # is not reached after this number of iterations, we apply the scaled # increment even though it does not satisfy the necessary criteria and # simply continue with the next Newton iteration. set Max Newton line search iterations = 5 # If the 'Nonlinear Newton solver switch tolerance' is reached before the # maximal number of Picard iterations, then the solver switches to Newton # solves anyway. set Max pre-Newton nonlinear iterations = 10 # The linear Stokes solver tolerance is dynamically chosen for the Newton # solver, based on the Eisenstat walker 1994 paper # (https://doi.org/10.1137/0917003), equation 2.2. Because this value can # become larger then one, we limit this value by this parameter. set Maximum linear Stokes solver tolerance = 0.9 # A relative tolerance with respect to the residual of the first # iteration, up to which the nonlinear Picard solver will iterate, before # changing to the Newton solver. set Nonlinear Newton solver switch tolerance = 1e-5 # When stabilizing the Newton matrix, we can encounter situations where # the coefficient inside the elliptic (top-left) block becomes negative or # zero. This coefficient has the form $1+x$ where $x$ can sometimes be # smaller than $-1$. In this case, the top-left block of the matrix is no # longer positive definite, and both preconditioners and iterative solvers # may fail. To prevent this, the stabilization computes an $\alpha$ so # that $1+\alpha x$ is never negative. This $\alpha$ is chosen as $1$ if # $x\ge -1$, and $\alpha=-\frac 1x$ otherwise. (Note that this always # leads to $0\le \alpha \le 1$.) On the other hand, we also want to stay # away from $1+\alpha x=0$, and so modify the choice of $\alpha$ to be $1$ # if $x\ge -c$, and $\alpha=-\frac cx$ with a $c$ between zero and one. # This way, if $c<1$, we are assured that $1-\alpha x>c$, i.e., bounded # away from zero. set SPD safety factor = 0.9 # This parameters allows for the stabilization of the preconditioner. If # one derives the Newton method without any modifications, the matrix # created for the preconditioning is not necessarily Symmetric Positive # Definite. This is problematic (see \cite{FBTGS18}). When `none' is # chosen, the preconditioner is not stabilized. The `symmetric' parameters # symmetrizes the matrix, and `PD' makes the matrix Positive Definite. # `SPD' is the full stabilization, where the matrix is guaranteed # Symmetric Positive Definite. set Stabilization preconditioner = SPD # This parameters allows for the stabilization of the velocity block. If # one derives the Newton method without any modifications, the matrix # created for the velocity block is not necessarily Symmetric Positive # Definite. This is problematic (see \cite{FBTGS18}). When `none' is # chosen, the velocity block is not stabilized. The `symmetric' parameters # symmetrizes the matrix, and `PD' makes the matrix Positive Definite. # `SPD' is the full stabilization, where the matrix is guaranteed # Symmetric Positive Definite. set Stabilization velocity block = SPD # If set to true, the Picard iteration uses the Eisenstat Walker method to # determine how accurately linear systems need to be solved. The Picard # iteration is used, for example, in the first few iterations of the # Newton method before the matrix is built including derivatives of the # model, since the Picard iteration generally converges even from points # where Newton's method does not. # # Once derivatives are used in a Newton method, \aspect{} always uses the # Eisenstat Walker method. set Use Eisenstat Walker method for Picard iterations = false # When this parameter is true and the linear solver fails, we try again, # but now with SPD stabilization for both the preconditioner and the # velocity block. The SPD stabilization will remain active until the next # timestep, when the default values are restored. set Use Newton failsafe = false # This method allows to slowly introduce the derivatives based on the # improvement of the residual. If set to false, the scaling factor for the # Newton derivatives is set to one immediately when switching on the # Newton solver. When this is set to true, the derivatives are slowly # introduced by the following equation: $\max(0.0, # (1.0-(residual/switch\_initial\_residual)))$, where # switch\_initial\_residual is the residual at the time when the Newton # solver is switched on. set Use Newton residual scaling method = false end subsection Operator splitting parameters # Set a time step size for computing reactions of compositional fields and # the temperature field in case operator splitting is used. This is only # used when the nonlinear solver scheme ``operator splitting'' is # selected. The reaction time step must be greater than 0. If you want to # prescribe the reaction time step only as a relative value compared to # the advection time step as opposed to as an absolute value, you should # use the parameter ``Reaction time steps per advection step'' and set # this parameter to the same (or larger) value as the ``Maximum time # step'' (which is 5.69e+300 by default). Units: Years or seconds, # depending on the ``Use years in output instead of seconds'' parameter. set Reaction time step = 1000.0 # The number of reaction time steps done within one advection time step in # case operator splitting is used. This is only used if the nonlinear # solver scheme ``operator splitting'' is selected. If set to zero, this # parameter is ignored. Otherwise, the reaction time step size is chosen # according to this criterion and the ``Reaction time step'', whichever # yields the smaller time step. Units: none. set Reaction time steps per advection step = 0 end subsection Stokes solver parameters # This is the number of iterations that define the GMRES solver restart # length. Increasing this parameter helps with convergence issues arising # from high localized viscosity jumps in the domain. Be aware that # increasing this number increases the memory usage of the Stokes solver, # and makes individual Stokes iterations more expensive. set GMRES solver restart length = 50 # This is the sole parameter for the IDR(s) Krylov solver and will dictate # the number of matrix-vector products and preconditioner applications per # iteration (s+1) and the total number of temporary vectors required # (5+3*s). For s=1, this method is analogous to BiCGStab. As s is # increased this method is expected to converge to GMRES in terms of # matrix-vector/preconditioner applications to solution. set IDR(s) parameter = 2 # This is the Krylov method used to solve the Stokes system. Both options, # GMRES and IDR(s), solve non-symmetric, indefinite systems. GMRES # guarantees the residual will be reduced in each iteration while IDR(s) # has no such property. On the other hand, the vector storage requirement # for GMRES is dependent on the restart length and can be quite # restrictive (since, for the matrix-free GMG solver, memory is dominated # by these vectors) whereas IDR(s) has a short term recurrence. Note that # the IDR(s) Krylov method is not available for the AMG solver since it is # not a flexible method, i.e., it cannot handle a preconditioner which may # change in each iteration (the AMG-based preconditioner contains a CG # solve in the pressure space which may have different number of # iterations each step). set Krylov method for cheap solver steps = GMRES # A relative tolerance up to which the approximate inverse of the $A$ # block of the Stokes system is computed. This approximate $A$ is used in # the preconditioning used in the GMRES solver. The exact definition of # this block preconditioner for the Stokes equation can be found in # \cite{KHB12}. set Linear solver A block tolerance = 1e-2 # A relative tolerance up to which the approximate inverse of the $S$ # block (i.e., the Schur complement matrix $S = BA^{-1}B^{T}$) of the # Stokes system is computed. This approximate inverse of the $S$ block is # used in the preconditioning used in the GMRES solver. The exact # definition of this block preconditioner for the Stokes equation can be # found in \cite{KHB12}. set Linear solver S block tolerance = 1e-6 # A relative tolerance up to which the linear Stokes systems in each time # or nonlinear step should be solved. The absolute tolerance will then be # $\| M x_0 - F \| \cdot \text{tol}$, where $x_0 = (0,p_0)$ is the initial # guess of the pressure, $M$ is the system matrix, $F$ is the right-hand # side, and tol is the parameter specified here. We include the initial # guess of the pressure to remove the dependency of the tolerance on the # static pressure. A given tolerance value of 1 would mean that a zero # solution vector is an acceptable solution since in that case the norm of # the residual of the linear system equals the norm of the right hand # side. A given tolerance of 0 would mean that the linear system has to be # solved exactly, since this is the only way to obtain a zero residual. # # In practice, you should choose the value of this parameter to be so that # if you make it smaller the results of your simulation do not change any # more (qualitatively) whereas if you make it larger, they do. For most # cases, the default value should be sufficient. In fact, a tolerance of # 1e-4 might be accurate enough. set Linear solver tolerance = 1e-3 # default: 1e-7 # This sets the maximum number of iterations used in the expensive Stokes # solver. If this value is set too low for the size of the problem, the # Stokes solver will not converge and return an error message pointing out # that the user didn't allow a sufficiently large number of iterations for # the iterative solver to converge. set Maximum number of expensive Stokes solver steps = 50000 # default: 1000 # As explained in the paper that describes ASPECT (Kronbichler, Heister, # and Bangerth, 2012, see \cite{KHB12}) we first try to solve the Stokes # system in every time step using a GMRES iteration with a poor but cheap # preconditioner. By default, we try whether we can converge the GMRES # solver in 200 such iterations before deciding that we need a better # preconditioner. This is sufficient for simple problems with variable # viscosity and we never need the second phase with the more expensive # preconditioner. On the other hand, for more complex problems, and in # particular for problems with strongly nonlinear viscosity, the 200 cheap # iterations don't actually do very much good and one might skip this part # right away. In that case, this parameter can be set to zero, i.e., we # immediately start with the better but more expensive preconditioner. set Number of cheap Stokes solver steps = 200 # This is the type of solver used on the Stokes system. The block # geometric multigrid solver currently has a limited implementation and # therefore may trigger Asserts in the code when used. If this is the # case, please switch to 'block AMG'. Additionally, the block GMG solver # requires using material model averaging. set Stokes solver type = block AMG # If set to true the linear system for the Stokes equation will be solved # using Trilinos klu, otherwise an iterative Schur complement solver is # used. The direct solver is only efficient for small problems. set Use direct solver for Stokes system = false # This parameter determines whether we use an simplified approximation of # the $A$ block as preconditioner for the Stokes solver, or the full $A$ # block. The simplified approximation only contains the terms that # describe the coupling of identical components (plus boundary conditions) # as described in \cite{KHB12}. The full block is closer to the # description in \cite{rudi2017weighted}. # # There is no clear way to determine which preconditioner performs better. # The default value (simplified approximation) requires more outer GMRES # iterations, but is faster to apply in each iteration. The full block # needs less assembly time (because the block is available anyway), # converges in less GMRES iterations, but requires more time per # iteration. There are also differences in the amount of memory # consumption between the two approaches. # # The default value should be good for relatively simple models, but in # particular for very strong viscosity contrasts the full $A$ block can be # advantageous. set Use full A block as preconditioner = false end end subsection Temperature field # A comma separated list denoting the solution method of the temperature # field. Each entry of the list must be one of the currently implemented # field types. # # These choices correspond to the following methods by which the temperature # field gains its values:\begin{itemize}\item ``field'': If the temperature # is marked with this method, then its values are computed in each time step # by solving the temperature advection-diffusion equation. In other words, # this corresponds to the usual notion of a temperature. # \item ``prescribed field'': The value of the temperature is determined in # each time step from the material model. If a compositional field is marked # with this method, then the value of a specific additional material model # output, called the `PrescribedTemperatureOutputs' is interpolated onto the # temperature. This field does not change otherwise, it is not advected with # the flow. # \end{itemize} set Temperature method = field end subsection Termination criteria # Whether to checkpoint the simulation right before termination. set Checkpoint on termination = false # Terminate the simulation once the specified timestep has been reached. set End step = 100 # A comma separated list of termination criteria that will determine when # the simulation should end. Whether explicitly stated or not, the ``end # time'' termination criterion will always be used.The following termination # criteria are available: # # `end step': Terminate the simulation once the specified timestep has been # reached. # # `end time': Terminate the simulation once the end time specified in the # input file has been reached. Unlike all other termination criteria, this # criterion is \textit{always} active, whether it has been explicitly # selected or not in the input file (this is done to preserve historical # behavior of \aspect{}, but it also likely does not inconvenience anyone # since it is what would be selected in most cases anyway). # # `steady state temperature': A criterion that terminates the simulation # when the global integral of the temperature field stays within a certain # range for a specified period of time. # # `steady state velocity': A criterion that terminates the simulation when # the RMS of the velocity field stays within a certain range for a specified # period of time. # # `user request': Terminate the simulation gracefully when a file with a # specified name appears in the output directory. This allows the user to # gracefully exit the simulation at any time by simply creating such a file # using, for example, \texttt{touch output/terminate}. The file's location # is chosen to be in the output directory, rather than in a generic location # such as the ASPECT directory, so that one can run multiple simulations at # the same time (which presumably write to different output directories) and # can selectively terminate a particular one. # # `wall time': Terminate the simulation once the wall time limit has # reached. set Termination criteria = end time # The wall time of the simulation. Unit: hours. set Wall time = 24. subsection Steady state temperature # The maximum relative deviation of the temperature in recent simulation # time for the system to be considered in steady state. If the actual # deviation is smaller than this number, then the simulation will be # terminated. set Maximum relative deviation = 0.05 # The minimum length of simulation time that the system should be in # steady state before termination.Units: years if the 'Use years in output # instead of seconds' parameter is set; seconds otherwise. set Time in steady state = 1e7 end subsection Steady state velocity # The maximum relative deviation of the RMS in recent simulation time for # the system to be considered in steady state. If the actual deviation is # smaller than this number, then the simulation will be terminated. set Maximum relative deviation = 0.05 # The minimum length of simulation time that the system should be in # steady state before termination.Units: years if the 'Use years in output # instead of seconds' parameter is set; seconds otherwise. set Time in steady state = 1e7 end subsection User request # The name of a file that, if it exists in the output directory (whose # name is also specified in the input file) will lead to termination of # the simulation. The file's location is chosen to be in the output # directory, rather than in a generic location such as the ASPECT # directory, so that one can run multiple simulations at the same time # (which presumably write to different output directories) and can # selectively terminate a particular one. set File name = terminate-aspect end end subsection Volume of Fluid # When set to true, Volume of Fluid interface tracking will be used set Enable interface tracking = false # Number of divisions per dimension when computing the initial volume # fractions.If set to the default of 3 for a 2D model, then initialization # will be based on the initialization criterion at $3^2=9$ points within # each cell. If the initialization based on a composition style initial # condition, a larger value may be desired for better approximation of the # initial fluid fractions. Smaller values will suffice in the case of level # set initializations due to the presence of more information to better # approximate the initial fluid fractions. set Number initialization samples = 3 # Minimum significant volume. Fluid fractions below this value are # considered to be zero. set Volume fraction threshold = 1e-6 # The relative tolerance up to which the linear system for the Volume of # Fluid system gets solved. See 'Solver parameters/Composition solver # tolerance' for more details. set Volume of Fluid solver tolerance = 1e-12 end