diff --git a/armi/bookkeeping/report/html.py b/armi/bookkeeping/report/html.py
index 696f38b142..15fd7e8c10 100644
--- a/armi/bookkeeping/report/html.py
+++ b/armi/bookkeeping/report/html.py
@@ -180,12 +180,14 @@ def encode64(file_path):
from armi import runLog
runLog.warning(
- "'.pdf' images cannot be embedded into this HTML report. {} will not be inserted.".format(
- file_path
+ (
+ f"'.pdf' images cannot be embedded into this HTML report. {file_path} will not be"
+ " inserted."
)
)
- return "Faulty PDF image inclusion: {} attempted to be inserted but no support is currently offered for such.".format(
- file_path
+ return (
+ f"Faulty PDF image inclusion: {file_path} attempted to be inserted but no support is "
+ "currently offered for such."
)
with open(file_path, "rb") as img_src:
return r"data:image/{};base64,{}".format(
diff --git a/armi/bookkeeping/report/newReports.py b/armi/bookkeeping/report/newReports.py
index 5043f6ffc7..ab6f214f1c 100644
--- a/armi/bookkeeping/report/newReports.py
+++ b/armi/bookkeeping/report/newReports.py
@@ -195,12 +195,13 @@ def render(self, level, idPrefix):
Parameters
----------
level : int
- level of the nesting for this section, determines the size of the heading title for the Section
- (The higher the level, the smaller the title font-size). Ranges from H1 - H4 in html terms.
+ level of the nesting for this section, determines the size of the heading title for the
+ Section (The higher the level, the smaller the title font-size). Ranges from H1 - H4 in
+ html terms.
idPrefix : String
- used for href/id referencing for the left hand side table of contents to be paired with the item
- that render() is called upon.
+ Used for href/id referencing for the left hand side table of contents to be paired with
+ the item that render() is called upon.
Returns
-------
@@ -440,10 +441,10 @@ class TimeSeries(ReportNode):
Example
-------
- >>> series = TimeSeries("Plot of K-effective", "plot", ["k-effective"], "k-eff", "keff.png") # Adding to a plot with k-effective
+ >>> series = TimeSeries("Plot of K-effective", "plot", ["k-effective"], "k-eff", "keff.png")
>>> time = r.p.time # The current time node of the reactor.
>>> data = r.core.p.keff # The parameter k-effective value at that time.
- >>> uncertainty = r.core.p.keffUnc # Since the parameter yields keff-uncontrolled value at the current time.
+ >>> uncertainty = r.core.p.keffUnc # The keff-uncontrolled at the current time.
>>> series.add("k-effective", time, data, uncertainty) # Adds this point to be plotted later.
>>> # Adding to a plot with multiple lines for fuel Burn-Up Plot.
diff --git a/armi/interfaces.py b/armi/interfaces.py
index 2855274b4c..6548380d69 100644
--- a/armi/interfaces.py
+++ b/armi/interfaces.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-r"""
+"""
Interfaces are objects of code that interact with ARMI. They read information off the state,
perform calculations (or run external codes), and then store the results back in the state.
@@ -42,23 +42,23 @@ class STACK_ORDER: # noqa: invalid-class-name
"""
Constants that help determine the order of modules in the interface stack.
- Each module defines an ``ORDER`` constant that specifies where in this order it
- should be placed in the Interface Stack.
+ Each module defines an ``ORDER`` constant that specifies where in this order it should be placed
+ in the Interface Stack.
.. impl:: Define an ordered list of interfaces.
:id: I_ARMI_OPERATOR_INTERFACES0
:implements: R_ARMI_OPERATOR_INTERFACES
- At each time node during a simulation, an ordered colletion of Interfaces
- are run (referred to as the interface stack). But ARMI does not force the order upon the analyst.
- Instead, each Interface registers where in that ordered list it belongs by
- giving itself an order number (which can be an integer or a decimal).
- This class defines a set of constants which can be imported and used
- by Interface developers to define that Interface's position in the stack.
+ At each time node during a simulation, an ordered colletion of Interfaces are run (referred
+ to as the interface stack). But ARMI does not force the order upon the analyst. Instead,
+ each Interface registers where in that ordered list it belongs by giving itself an order
+ number (which can be an integer or a decimal). This class defines a set of constants which
+ can be imported and used by Interface developers to define that Interface's position in the
+ stack.
- The constants defined are given names, based on common stack orderings
- in the ARMI ecosystem. But in the end, these are just constant values,
- and the names they are given are merely suggestions.
+ The constants defined are given names, based on common stack orderings in the ARMI
+ ecosystem. But in the end, these are just constant values, and the names they are given are
+ merely suggestions.
See Also
--------
@@ -158,12 +158,14 @@ def storePreviousIterationValue(self, val: _SUPPORTED_TYPES):
def isConverged(self, val: _SUPPORTED_TYPES) -> bool:
"""
- Return boolean indicating if the convergence criteria between the current and previous iteration values are met.
+ Return boolean indicating if the convergence criteria between the current and previous
+ iteration values are met.
Parameters
----------
val : _SUPPORTED_TYPES
- the most recent value for computing convergence critera. Is commonly equal to interface.getTightCouplingValue()
+ The most recent value for computing convergence critera. Is commonly equal to
+ interface.getTightCouplingValue()
Returns
-------
@@ -172,18 +174,19 @@ def isConverged(self, val: _SUPPORTED_TYPES) -> bool:
Notes
-----
- - On convergence, this class is automatically reset to its initial condition to avoid retaining
- or holding a stale state. Calling this method will increment a counter that when exceeded will
- clear the state. A warning will be reported if the state is cleared prior to the convergence
- criteria being met.
- - For computing convergence of arrays, only up to 2D is allowed. 3D arrays would arise from considering
- component level parameters. However, converging on component level parameters is not supported at this time.
+ - On convergence, this class is automatically reset to its initial condition to avoid
+ retaining or holding a stale state. Calling this method will increment a counter that when
+ exceeded will clear the state. A warning will be reported if the state is cleared prior to
+ the convergence criteria being met.
+ - For computing convergence of arrays, only up to 2D is allowed. 3D arrays would arise from
+ considering component level parameters. However, converging on component level parameters
+ is not supported at this time.
Raises
------
ValueError
- If the previous iteration value has not been assigned. The ``storePreviousIterationValue`` method
- must be called first.
+ If the previous iteration value has not been assigned. The
+ ``storePreviousIterationValue`` method must be called first.
RuntimeError
Only support calculating norms for up to 2D arrays.
"""
@@ -212,8 +215,8 @@ def isConverged(self, val: _SUPPORTED_TYPES) -> bool:
"Currently only support up to 2D arrays for calculating convergence of arrays."
)
- # Check if convergence is satisfied. If so, or if reached max number of iters, then
- # reset the number of iterations
+ # Check if convergence is satisfied. If so, or if reached max number of iters, then reset
+ # the number of iterations
converged = self.eps < self.tolerance
if converged:
self._numIters = 0
@@ -285,16 +288,15 @@ def getInputFiles(cls, cs):
name: Union[str, None] = None
"""
- The name of the interface. This is undefined for the base class, and must be
- overridden by any concrete class that extends this one.
+ The name of the interface. This is undefined for the base class, and must be overridden by any
+ concrete class that extends this one.
"""
# TODO: This is a terrible name.
function = None
"""
- The function performed by an Interface. This is not required be be defined
- by implementations of Interface, but is used to form categories of
- interfaces.
+ The function performed by an Interface. This is not required be be defined by implementations of
+ Interface, but is used to form categories of interfaces.
"""
class Distribute:
@@ -308,8 +310,8 @@ def __init__(self, r, cs):
"""
Construct an interface.
- The ``r`` and ``cs`` arguments are required, but may be ``None``, where
- appropriate for the specific ``Interface`` implementation.
+ The ``r`` and ``cs`` arguments are required, but may be ``None``, where appropriate for the
+ specific ``Interface`` implementation.
Parameters
----------
@@ -352,8 +354,8 @@ def distributable(self):
Notes
-----
- Cases where this isn't possible include the database interface,
- where the SQL driver cannot be distributed.
+ Cases where this isn't possible include the database interface, where the SQL driver cannot
+ be distributed.
"""
return self.Distribute.DUPLICATE
@@ -391,17 +393,20 @@ def attachReactor(self, o, r):
self.o = o
def detachReactor(self):
- """Delete the callbacks to reactor or operator. Useful when pickling, MPI sending, etc. to save memory."""
+ """Delete the callbacks to reactor or operator. Useful when pickling, MPI sending, etc. to
+ save memory.
+ """
self.o = None
self.r = None
self.cs = None
def duplicate(self):
"""
- Duplicate this interface without duplicating some of the large attributes (like the entire reactor).
+ Duplicate this interface without duplicating some of the large attributes (like the entire
+ reactor).
- Makes a copy of interface with detached reactor/operator/settings so that it can be attached to an operator
- at a later point in time.
+ Makes a copy of interface with detached reactor/operator/settings so that it can be attached
+ to an operator at a later point in time.
Returns
-------
@@ -461,9 +466,9 @@ def _initializeParams(self):
Notes
-----
- Parameters with defaults are not written to the database until they have been assigned SINCE_ANYTHING.
- This is done to reduce database size, so that we don't write parameters to the DB that are related to
- interfaces that are not not active.
+ Parameters with defaults are not written to the database until they have been assigned
+ SINCE_ANYTHING. This is done to reduce database size, so that we don't write parameters to
+ the DB that are related to interfaces that are not not active.
"""
for paramDef in parameters.ALL_DEFINITIONS.inCategory(self.name):
if paramDef.default not in (None, parameters.NoDefault):
@@ -507,11 +512,11 @@ def isRequestedDetailPoint(self, cycle=None, node=None):
Notes
-----
- By default, detail points are either during the requested snapshots,
- if any exist, or all cycles and nodes if none exist.
+ By default, detail points are either during the requested snapshots, if any exist, or all
+ cycles and nodes if none exist.
- This is useful for peripheral interfaces (CR Worth, perturbation theory, transients)
- that may or may not be requested during a standard run.
+ This is useful for peripheral interfaces (CR Worth, perturbation theory, transients) that
+ may or may not be requested during a standard run.
If both cycle and node are None, this returns True
@@ -557,12 +562,11 @@ def workerOperate(self, _cmd):
return False
def enabled(self, flag=None):
- r"""
+ """
Mechanism to allow interfaces to be attached but not running at the interaction points.
- Must be implemented on the individual interface level hooks.
- If given no arguments, returns status of enabled
- If arguments, sets enabled to that flag. (True or False)
+ Must be implemented on the individual interface level hooks. If given no arguments, returns
+ status of enabled. If arguments, sets enabled to that flag. (True or False)
Notes
-----
@@ -576,7 +580,7 @@ def enabled(self, flag=None):
raise ValueError("Non-bool passed to assign {}.enable().".format(self))
def bolForce(self, flag=None):
- r"""
+ """
Run interactBOL even if this interface is disabled.
Parameters
@@ -610,26 +614,25 @@ def specifyInputs(cs) -> Dict[Union[str, settings.Setting], List[str]]:
"""
Return a collection of file names that are considered input files.
- This is a static method (i.e. is not called on a particular instance of the
- class), since it should not require an Interface to actually be constructed.
- This would require constructing a reactor object, which is expensive.
+ This is a static method (i.e. is not called on a particular instance of the class), since it
+ should not require an Interface to actually be constructed. This would require constructing
+ a reactor object, which is expensive.
- The files returned by an implementation should be those that one would want
- copied to a target location when cloning a Case or CaseSuite. These can be
- absolute paths, relative paths, or glob patterns that will be interpolated
- relative to the input directory. Absolute paths will not be copied anywhere.
+ The files returned by an implementation should be those that one would want copied to a
+ target location when cloning a Case or CaseSuite. These can be absolute paths, relative
+ paths, or glob patterns that will be interpolated relative to the input directory. Absolute
+ paths will not be copied anywhere.
- The returned dictionary will enable the source Settings object to
- be updated to the new file location. While the dictionary keys are
- recommended to be Setting objects, the name of the setting as a string,
- e.g., "shuffleLogic", is still interpreted. If the string name does not
+ The returned dictionary will enable the source Settings object to be updated to the new file
+ location. While the dictionary keys are recommended to be Setting objects, the name of the
+ setting as a string, e.g., "shuffleLogic", is still interpreted. If the string name does not
point to a valid setting then this will lead to a failure.
Note
----
- This existed before the advent of ARMI plugins. Perhaps it can be better served
- as a plugin hook. Potential future work.
+ This existed before the advent of ARMI plugins. Perhaps it can be better served as a plugin
+ hook. Potential future work.
See Also
--------
@@ -679,8 +682,8 @@ class OutputReader:
Notes
-----
- Should ideally not require r, eci, and fname arguments
- and would rather just have an apply(reactor) method.
+ Should ideally not require r, eci, and fname arguments and would rather just have an
+ apply(reactor) method.
"""
def __init__(self, r=None, externalCodeInterface=None, fName=None, cs=None):
diff --git a/armi/materials/inconel600.py b/armi/materials/inconel600.py
index a59ed78200..a700282a1b 100644
--- a/armi/materials/inconel600.py
+++ b/armi/materials/inconel600.py
@@ -39,8 +39,8 @@ class Inconel600(Material):
def __init__(self):
Material.__init__(self)
self.refDens = 8.47 # g/cc
- # Only density measurement presented in the reference.
- # Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.
+ # Only density measurement presented in the reference. Presumed to be performed at 21C since
+ # this was the reference temperature for linear expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
@@ -202,11 +202,12 @@ def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf.
- Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
- from percent strain to strain, then differentiated with respect to temperature to find the correlation
- for instantaneous linear expansion.
+ Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
+ to convert from percent strain to strain, then differentiated with respect to temperature to
+ find the correlation for instantaneous linear expansion.
- i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
+ i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
+ correlation is 2*a/100*Tc + b/100
2*(3.722e-7/100.0)*Tc + 1.303e-3/100.0
diff --git a/armi/materials/inconel625.py b/armi/materials/inconel625.py
index 1328c18ddd..5eabd9e3fa 100644
--- a/armi/materials/inconel625.py
+++ b/armi/materials/inconel625.py
@@ -231,11 +231,12 @@ def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf.
- Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
- from percent strain to strain, then differentiated with respect to temperature to find the correlation
- for instantaneous linear expansion.
+ Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
+ to convert from percent strain to strain, then differentiated with respect to temperature to
+ find the correlation for instantaneous linear expansion.
- i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
+ i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
+ correlation is 2*a/100*Tc + b/100
2*(5.083e-7/100.0)*Tc + 1.125e-3/100.0
diff --git a/armi/materials/inconelX750.py b/armi/materials/inconelX750.py
index b56e2e95d1..cf0d6997a9 100644
--- a/armi/materials/inconelX750.py
+++ b/armi/materials/inconelX750.py
@@ -40,7 +40,8 @@ def __init__(self):
Material.__init__(self)
self.refDens = 8.28 # g/cc
# Only density measurement presented in the reference.
- # Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.
+ # Presumed to be performed at 21C since this was the reference temperature for linear
+ # expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
@@ -62,9 +63,9 @@ def setDefaultMassFracs(self):
def polyfitThermalConductivity(self, power=2):
r"""
- Calculates the coefficients of a polynomial fit for thermalConductivity.
- Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf
- Fits a polynomial to the data set and returns the coefficients.
+ Calculates the coefficients of a polynomial fit for thermalConductivity. Based on data from
+ http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf Fits a polynomial to the
+ data set and returns the coefficients.
Parameters
----------
@@ -234,11 +235,12 @@ def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf.
- Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
- from percent strain to strain, then differentiated with respect to temperature to find the correlation
- for instantaneous linear expansion.
+ Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
+ to convert from percent strain to strain, then differentiated with respect to temperature to
+ find the correlation for instantaneous linear expansion.
- i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
+ i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
+ correlation is 2*a/100*Tc + b/100
2*(6.8378e-7/100.0)*Tc + 1.056e-3/100.0
diff --git a/armi/materials/material.py b/armi/materials/material.py
index 9ba80ec59a..47a03a7c6b 100644
--- a/armi/materials/material.py
+++ b/armi/materials/material.py
@@ -40,21 +40,19 @@ class Material:
:id: I_ARMI_MAT_PROPERTIES
:implements: R_ARMI_MAT_PROPERTIES
- The ARMI Materials library is based on the Object-Oriented Programming design
- approach, and uses this generic ``Material`` base class. In this class we
- define a large number of material properties like density, heat capacity, or
- linear expansion coefficient. Specific materials then subclass this base class to
- assign particular values to those properties.
+ The ARMI Materials library is based on the Object-Oriented Programming design approach, and
+ uses this generic ``Material`` base class. In this class we define a large number of
+ material properties like density, heat capacity, or linear expansion coefficient. Specific
+ materials then subclass this base class to assign particular values to those properties.
.. impl:: Materials generate nuclide mass fractions at instantiation.
:id: I_ARMI_MAT_FRACS
:implements: R_ARMI_MAT_FRACS
- An ARMI material is meant to be able to represent real world materials that
- might be used in the construction of a nuclear reactor. As such, they are
- not just individual nuclides, but practical materials like a particular
- concrete, steel, or water. One of the main things that will be needed to
- describe such a material is the exact nuclide fractions. As such, the
+ An ARMI material is meant to be able to represent real world materials that might be used in
+ the construction of a nuclear reactor. As such, they are not just individual nuclides, but
+ practical materials like a particular concrete, steel, or water. One of the main things that
+ will be needed to describe such a material is the exact nuclide fractions. As such, the
constructor of every Material subclass attempts to set these mass fractions.
Attributes
@@ -64,13 +62,12 @@ class Material:
massFrac : dict
Mass fractions for all nuclides in the material keyed on the nuclide symbols
refDens : float
- A reference density used by some materials, for instance `SimpleSolid`s,
- during thermal expansion
+ A reference density used by some materials, for instance `SimpleSolid`s, during thermal
+ expansion
theoreticalDensityFrac : float
- Fraction of the material's density in reality, which is commonly different
- from 1.0 in solid materials due to the manufacturing process.
- Can often be set from the blueprints input via the TD_frac material modification.
- For programmatic setting, use `adjustTD()`.
+ Fraction of the material's density in reality, which is commonly different from 1.0 in solid
+ materials due to the manufacturing process. Can often be set from the blueprints input via
+ the TD_frac material modification. For programmatic setting, use `adjustTD()`.
Notes
-----
@@ -122,10 +119,10 @@ def name(self):
:id: I_ARMI_MAT_NAME
:implements: R_ARMI_MAT_NAME
- Every instance of an ARMI material must have a simple, human-readable
- string name. And, if possible, we want this string to match the class
- name. (This, of course, puts some limits on both the string and the
- class name.) These names are easily retrievable as a class property.
+ Every instance of an ARMI material must have a simple, human-readable string name. And,
+ if possible, we want this string to match the class name. (This, of course, puts some
+ limits on both the string and the class name.) These names are easily retrievable as a
+ class property.
"""
return self._name
@@ -135,8 +132,8 @@ def name(self, nomen):
Warning
-------
- Some code in ARMI expects the "name" of a meterial matches its
- class name. So you use this method at your own risk.
+ Some code in ARMI expects the "name" of a meterial matches its class name. So you use this
+ method at your own risk.
See Also
--------
@@ -203,8 +200,7 @@ def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:
"""
The instantaneous linear expansion coefficient (dL/L)/dT.
- This is used for reactivity coefficients, etc. but will not affect
- density or dimensions.
+ This is used for reactivity coefficients, etc. but will not affect density or dimensions.
See Also
--------
@@ -239,8 +235,7 @@ def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
def linearExpansionFactor(self, Tc: float, T0: float) -> float:
"""
- Return a dL/L factor relative to T0 instead of the material-dependent reference
- temperature.
+ Return a dL/L factor relative to T0 instead of the material-dependent reference temperature.
Notes
-----
@@ -285,9 +280,8 @@ def setMassFrac(self, nucName: str, massFrac: float) -> None:
Notes
-----
- This will try to convert the provided ``massFrac`` into a float
- for assignment. If the conversion cannot occur then an error
- will be thrown.
+ This will try to convert the provided ``massFrac`` into a float for assignment. If the
+ conversion cannot occur then an error will be thrown.
"""
try:
massFrac = float(massFrac)
@@ -323,10 +317,11 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
"""
Change the mass fraction of the specified nuclide.
- This adjusts the mass fraction of a specified nuclide relative to other nuclides of the same element. If there
- are no other nuclides within the element, then it is enriched relative to the entire material. For example,
- enriching U235 in UZr would enrich U235 relative to U238 and other naturally occurring uranium isotopes.
- Likewise, enriching ZR in UZr would enrich ZR relative to uranium.
+ This adjusts the mass fraction of a specified nuclide relative to other nuclides of the same
+ element. If there are no other nuclides within the element, then it is enriched relative to
+ the entire material. For example, enriching U235 in UZr would enrich U235 relative to U238
+ and other naturally occurring uranium isotopes. Likewise, enriching ZR in UZr would enrich
+ ZR relative to uranium.
The method maintains a constant number of atoms, and adjusts ``refDens`` accordingly.
@@ -365,16 +360,16 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
if isinstance(
nuclideBases.byName[nuclideName], nuclideBases.NaturalNuclideBase
) or nuclideBases.isMonoIsotopicElement(nuclideName):
- # if there are not any other nuclides, assume we are enriching an entire element
- # consequently, allIndicesUpdated is no longer the element's indices, but the materials indices
+ # If there are not any other nuclides, assume we are enriching an entire element.
+ # Consequently, allIndicesUpdated is no longer the element's indices, but the
+ # materials indices
allIndicesUpdated = range(len(nucsNames))
else:
raise ValueError( # could be warning if problematic
- "Nuclide {} was to be enriched in material {}, but there were no other isotopes of "
- "that element. Could not assume the enrichment of the entire element as there were "
- "other possible isotopes that did not exist in this material.".format(
- nuclideName, self
- )
+ "Nuclide {} was to be enriched in material {}, but there were no other "
+ "isotopes of that element. Could not assume the enrichment of the entire "
+ "element as there were other possible isotopes that did not exist in this "
+ "material.".format(nuclideName, self)
)
if massFraction == 1.0:
@@ -388,8 +383,8 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
onlyOneOtherFracToDetermine = len(allIndicesUpdated) == 2
if not onlyOneOtherFracToDetermine:
raise ValueError(
- "Material {} has too many masses set to zero. cannot enrich {} to {}. Current "
- "mass fractions: {}".format(
+ "Material {} has too many masses set to zero. cannot enrich {} to {}. "
+ "Current mass fractions: {}".format(
self, nuclideName, massFraction, self.massFrac
)
)
@@ -500,9 +495,8 @@ def density(self, Tk: float = None, Tc: float = None) -> float:
Notes
-----
- Since refDens is specified at the material-dep reference case, we don't
- need to specify the reference temperature. It is already consistent with linearExpansion
- Percent.
+ Since refDens is specified at the material-dep reference case, we don't need to specify the
+ reference temperature. It is already consistent with linearExpansion Percent.
- p*(dp/p(T) + 1) =p*( p + dp(T) )/p = p + dp(T) = p(T)
- dp/p = (1-(1 + dL/L)**3)/(1 + dL/L)**3
"""
@@ -577,15 +571,15 @@ def getMassFrac(
Notes
-----
- self.massFrac are modified mass fractions that may not add up to 1.0
- (for instance, after a axial expansion, the modified mass fracs will sum to less than one.
- The alternative is to put a multiplier on the density. They're mathematically equivalent.
+ self.massFrac are modified mass fractions that may not add up to 1.0 (for instance, after a
+ axial expansion, the modified mass fracs will sum to less than one. The alternative is to
+ put a multiplier on the density. They're mathematically equivalent.
- This function returns the normalized mass fraction (they will add to 1.0) as long as
- the mass fracs are modified only by get and setMassFrac
+ This function returns the normalized mass fraction (they will add to 1.0) as long as the
+ mass fracs are modified only by get and setMassFrac
- This is a performance-critical method as it is called millions of times in a
- typical ARMI run.
+ This is a performance-critical method as it is called millions of times in a typical ARMI
+ run.
See Also
--------
@@ -626,7 +620,9 @@ def checkPropertyTempRange(self, label, val):
def checkTempRange(self, minT, maxT, val, label=""):
"""
- Checks if the given temperature (val) is between the minT and maxT temperature limits supplied.
+ Checks if the given temperature (val) is between the minT and maxT temperature limits
+ supplied.
+
Label identifies what material type or element is being evaluated in the check.
Parameters
@@ -683,13 +679,11 @@ def getNuclides(self):
Notes
-----
- This method is the only reason Materials still have self.parent.
- Essentially, we want to change that, but right now the logic for finding
- nuclides in the Reactor is recursive and considers Materials first.
- The bulk of the work in finally removing this method will come in
- downstream repos, where users have fully embraced this method and call
- it directly in many, many places.
- Please do not use this method, as it is being deprecated.
+ This method is the only reason Materials still have self.parent. Essentially, we want to
+ change that, but right now the logic for finding nuclides in the Reactor is recursive and
+ considers Materials first. The bulk of the work in finally removing this method will come in
+ downstream repos, where users have fully embraced this method and call it directly in many,
+ many places. Please do not use this method, as it is being deprecated.
"""
warnings.warn("Material.getNuclides is being deprecated.", DeprecationWarning)
return self.parent.getNuclides()
@@ -703,8 +697,9 @@ def getTempChangeForDensityChange(
deltaT = linearChange / linearExpansion
if not quiet:
runLog.info(
- f"The linear expansion for {self.getName()} at initial temperature of {Tc} C is {linearExpansion}.\n"
- f"A change in density of {(densityFrac - 1.0) * 100.0} percent at would require a change in temperature of {deltaT} C.",
+ f"The linear expansion for {self.getName()} at initial temperature of {Tc} C is "
+ f"{linearExpansion}.\nA change in density of {(densityFrac - 1.0) * 100.0} percent "
+ "at would require a change in temperature of {deltaT} C.",
single=True,
)
return deltaT
@@ -729,7 +724,9 @@ class Fluid(Material):
"""A material that fills its container. Could also be a gas."""
def getThermalExpansionDensityReduction(self, prevTempInC, newTempInC):
- """Return the factor required to update thermal expansion going from temperatureInC to temperatureInCNew."""
+ """Return the factor required to update thermal expansion going from temperatureInC to
+ temperatureInCNew.
+ """
rho0 = self.pseudoDensity(Tc=prevTempInC)
if not rho0:
return 1.0
@@ -744,10 +741,10 @@ def linearExpansion(self, Tk=None, Tc=None):
:id: I_ARMI_MAT_FLUID
:implements: R_ARMI_MAT_FLUID
- ARMI does not model thermal expansion of fluids. The ``Fluid`` superclass
- therefore sets the thermal expansion coefficient to zero. All fluids
- subclassing the ``Fluid`` material will inherit this method which sets the
- linear expansion coefficient to zero at all temperatures.
+ ARMI does not model thermal expansion of fluids. The ``Fluid`` superclass therefore sets
+ the thermal expansion coefficient to zero. All fluids subclassing the ``Fluid``
+ material will inherit this method which sets the linear expansion coefficient to zero at
+ all temperatures.
"""
return 0.0
@@ -761,8 +758,8 @@ def getTempChangeForDensityChange(
deltaT = tAtPerturbedDensity - Tc
if not quiet:
runLog.info(
- "A change in density of {} percent in {} at an initial temperature of {} C would require "
- "a change in temperature of {} C.".format(
+ "A change in density of {} percent in {} at an initial temperature of {} C would "
+ "require a change in temperature of {} C.".format(
(densityFrac - 1.0) * 100.0, self.getName(), Tc, deltaT
),
single=True,
@@ -815,9 +812,8 @@ def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
Notes
-----
- This only method only works for Simple Solid Materials which assumes
- the density function returns 'free expansion' density as a function
- temperature
+ This only method only works for Simple Solid Materials which assumes the density function
+ returns 'free expansion' density as a function temperature
"""
density1 = self.density(Tk=self.refTempK)
density2 = self.density(Tk=Tk, Tc=Tc)
@@ -864,13 +860,13 @@ def applyInputParams(
Notes
-----
- This is often overridden to insert customized material modification parameters
- but then this parent should always be called at the end in case users want to
- use this style of custom input.
+ This is often overridden to insert customized material modification parameters but then this
+ parent should always be called at the end in case users want to use this style of custom
+ input.
- This is only applied to materials considered fuel so we don't apply these
- kinds of parameters to coolants and structural material, which are often
- not parameterized with any kind of enrichment.
+ This is only applied to materials considered fuel so we don't apply these kinds of
+ parameters to coolants and structural material, which are often not parameterized with any
+ kind of enrichment.
"""
if class1_wt_frac:
if not 0 <= class1_wt_frac <= 1:
@@ -891,8 +887,8 @@ def applyInputParams(
)
if class1_custom_isotopics == class2_custom_isotopics:
runLog.warning(
- "The custom isotopics specified for the class1/class2 materials"
- f" are both '{class1_custom_isotopics}'. You are not actually blending anything!"
+ "The custom isotopics specified for the class1/class2 materials are both "
+ f"'{class1_custom_isotopics}'. You are not actually blending anything!"
)
self.class1_wt_frac = class1_wt_frac
@@ -907,8 +903,8 @@ def _applyIsotopicsMixFromCustomIsotopicsInput(self, customIsotopics):
Only adjust heavy metal.
- This may also be needed for building charge assemblies during reprocessing, but
- will take input from the SFP rather than from the input external feeds.
+ This may also be needed for building charge assemblies during reprocessing, but will take
+ input from the SFP rather than from the input external feeds.
"""
class1Isotopics = customIsotopics[self.class1_custom_isotopics]
class2Isotopics = customIsotopics[self.class2_custom_isotopics]
diff --git a/armi/materials/mox.py b/armi/materials/mox.py
index f4fdfc191e..b2ab41700c 100644
--- a/armi/materials/mox.py
+++ b/armi/materials/mox.py
@@ -17,8 +17,9 @@
A definitive source for these properties is [#ornltm20002]_.
-.. [#ornltm20002] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. S.G. Popov, et.al.
- Oak Ridge National Laboratory. ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf
+.. [#ornltm20002] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of
+ Irradiation. S.G. Popov, et.al. Oak Ridge National Laboratory.
+ ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf
"""
from armi import runLog
@@ -31,8 +32,8 @@ class MOX(UraniumOxide):
"""
MOX fuel.
- Some parameters (density, thermal conductivity, etc) are inherited from UraniumOxide.
- These parameters are sufficiently equivalent to pure UO2 in the literature to leave them unchanged.
+ Some parameters (density, thermal conductivity, etc) are inherited from UraniumOxide. These
+ parameters are sufficiently equivalent to pure UO2 in the literature to leave them unchanged.
Specific MOX mixtures may be defined in blueprints under custom isotopics.
"""
diff --git a/armi/materials/tZM.py b/armi/materials/tZM.py
index 84da845812..98a9542f74 100644
--- a/armi/materials/tZM.py
+++ b/armi/materials/tZM.py
@@ -22,9 +22,9 @@
class TZM(Material):
propertyValidTemperature = {"linear expansion percent": ((21.11, 1382.22), "C")}
references = {
- "linear expansion percent": "Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced \
- in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons Contract No. N600(19)-59530, \
- Southern Research Institute"
+ "linear expansion percent": "Report on the Mechanical and Thermal Properties of Tungsten \
+ and TZM Sheet Produced in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau \
+ of Naval Weapons Contract No. N600(19)-59530, Southern Research Institute"
}
temperatureC = [
@@ -66,8 +66,8 @@ def setDefaultMassFracs(self):
self.setMassFrac("MO", 0.996711222)
def linearExpansionPercent(self, Tk=None, Tc=None):
- r"""
- return linear expansion in %dL/L from interpolation of tabular data.
+ """
+ Return linear expansion in %dL/L from interpolation of tabular data.
This function is used to expand a material from its reference temperature (21C)
to a particular hot temperature.
@@ -79,9 +79,9 @@ def linearExpansionPercent(self, Tk=None, Tc=None):
Tc : float
temperature in C
- Source: Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced \
- in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons Contract No. N600(19)-59530, 1966 \
- Southern Research Institute.
+ Source: Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced
+ in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons
+ Contract No. N600(19)-59530, 1966 Southern Research Institute.
See Table viii-b, Appendix B, page 181.
"""
diff --git a/armi/materials/tests/test_materials.py b/armi/materials/tests/test_materials.py
index aa670d9475..44bdd17ae1 100644
--- a/armi/materials/tests/test_materials.py
+++ b/armi/materials/tests/test_materials.py
@@ -1354,8 +1354,9 @@ def test_01_linearExpansionPercent(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel 600 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel 600 linearExpansionPercent(Tk=None,Tc=None)\n"
+ "Received:{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
@@ -1375,8 +1376,9 @@ def test_02_linearExpansion(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansion(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel 600 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel 600 linearExpansion(Tk=None,Tc=None)\nReceived:"
+ "{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
@@ -1494,8 +1496,9 @@ def test_01_linearExpansionPercent(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel 625 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel 625 linearExpansionPercent(Tk=None,Tc=None)\n"
+ "Received:{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
@@ -1632,8 +1635,9 @@ def test_01_linearExpansionPercent(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel X750 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel X750 linearExpansionPercent(Tk=None,Tc=None)\n"
+ "Received:{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
diff --git a/armi/materials/water.py b/armi/materials/water.py
index 8b44a23bdd..0cd35b66ed 100644
--- a/armi/materials/water.py
+++ b/armi/materials/water.py
@@ -21,6 +21,11 @@
from armi.utils import units
from armi.utils.units import getTk
+_REF_SR1_86 = (
+ "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and "
+ "Steam"
+)
+
class Water(Fluid):
"""
@@ -41,13 +46,13 @@ class Water(Fluid):
thermalScatteringLaws = (tsl.byNbAndCompound[nb.byName["H"], tsl.H2O],)
references = {
- "vapor pressure": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "enthalpy (saturated water)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "enthalpy (saturated steam)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "entropy (saturated water)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "entropy (saturated steam)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "density (saturated water)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "density (saturated steam)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
+ "vapor pressure": _REF_SR1_86,
+ "enthalpy (saturated water)": _REF_SR1_86,
+ "enthalpy (saturated steam)": _REF_SR1_86,
+ "entropy (saturated water)": _REF_SR1_86,
+ "entropy (saturated steam)": _REF_SR1_86,
+ "density (saturated water)": _REF_SR1_86,
+ "density (saturated steam)": _REF_SR1_86,
}
TEMPERATURE_CRITICAL_K = 647.096
diff --git a/armi/nucDirectory/nuclideBases.py b/armi/nucDirectory/nuclideBases.py
index 172342dc26..f0f2154cc6 100644
--- a/armi/nucDirectory/nuclideBases.py
+++ b/armi/nucDirectory/nuclideBases.py
@@ -356,8 +356,9 @@ def __init__(
"""
Create an instance of an INuclide.
- .. warning::
- Do not call this constructor directly; use the factory instead.
+ Warning
+ -------
+ Do not call this constructor directly; use the factory instead.
"""
if element not in elements.byName.values():
raise ValueError(
@@ -365,7 +366,8 @@ def __init__(
)
if state < 0:
raise ValueError(
- f"Error in initializing nuclide {name}. An invalid state {state} is provided. The state must be a positive integer."
+ f"Error in initializing nuclide {name}. An invalid state {state} is provided. The "
+ "state must be a positive integer."
)
if halflife < 0.0:
raise ValueError(
diff --git a/armi/nuclearDataIO/cccc/cccc.py b/armi/nuclearDataIO/cccc/cccc.py
index dd3133481c..dba29e5930 100644
--- a/armi/nuclearDataIO/cccc/cccc.py
+++ b/armi/nuclearDataIO/cccc/cccc.py
@@ -16,7 +16,8 @@
Defines containers for the reading and writing standard interface files
for reactor physics codes.
-.. impl:: Generic tool for reading and writing Committee on Computer Code Coordination (CCCC) format files for reactor physics codes
+.. impl:: Generic tool for reading and writing Committee on Computer Code Coordination (CCCC) format
+ files for reactor physics codes
:id: I_ARMI_NUCDATA
:implements: R_ARMI_NUCDATA_ISOTXS,
R_ARMI_NUCDATA_GAMISO,
@@ -25,24 +26,21 @@
R_ARMI_NUCDATA_PMATRX,
R_ARMI_NUCDATA_DLAYXS
- This module provides a number of base classes that implement general
- capabilities for binary and ASCII file I/O. The :py:class:`IORecord` serves
- as an abstract base class that instantiates a number of methods that the
- binary and ASCII children classes are meant to implement. These methods,
- prefixed with ``rw``, are meant to convert literal data types, e.g. float or
- int, to either binary or ASCII. This base class does its own conversion for
- container data types, e.g. list or matrix, relying on the child
- implementation of the literal types that the container possesses. The binary
- conversion is implemented in :py:class:`BinaryRecordReader` and
+ This module provides a number of base classes that implement general capabilities for binary and
+ ASCII file I/O. The :py:class:`IORecord` serves as an abstract base class that instantiates a
+ number of methods that the binary and ASCII children classes are meant to implement. These
+ methods, prefixed with ``rw``, are meant to convert literal data types, e.g. float or int, to
+ either binary or ASCII. This base class does its own conversion for container data types, e.g.
+ list or matrix, relying on the child implementation of the literal types that the container
+ possesses. The binary conversion is implemented in :py:class:`BinaryRecordReader` and
:py:class:`BinaryRecordWriter`. The ASCII conversion is implemented in
:py:class:`AsciiRecordReader` and :py:class:`AsciiRecordWriter`.
- These :py:class:`IORecord` classes are used within :py:class:`Stream` objects
- for the data conversion. :py:class:`Stream` is a context manager that opens
- a file for reading or writing on the ``__enter__`` and closes that file upon
- ``__exit__``. :py:class:`Stream` is an abstract base class that is
- subclassed for each CCCC file. It is subclassed directly for the CCCC files
- that contain cross-section data:
+ These :py:class:`IORecord` classes are used within :py:class:`Stream` objects for the data
+ conversion. :py:class:`Stream` is a context manager that opens a file for reading or writing on
+ the ``__enter__`` and closes that file upon ``__exit__``. :py:class:`Stream` is an abstract base
+ class that is subclassed for each CCCC file. It is subclassed directly for the CCCC files that
+ contain cross-section data:
* :py:class:`ISOTXS `
* :py:mod:`GAMISO `
@@ -50,32 +48,28 @@
* :py:class:`DLAYXS `
* :py:mod:`COMPXS `
- For the CCCC file types that are outputs from a flux solver such as DIF3D
- (e.g., GEODST, DIF3D, NHFLUX) the streams are subclassed from
- :py:class:`StreamWithDataContainer`, which is a special abstract subclass of
- :py:class:`Stream` that implements a common pattern used for these file
- types. In a :py:class:`StreamWithDataContainer`, the data is directly read
- to or written from a specialized data container.
+ For the CCCC file types that are outputs from a flux solver such as DIF3D (e.g., GEODST, DIF3D,
+ NHFLUX) the streams are subclassed from :py:class:`StreamWithDataContainer`, which is a special
+ abstract subclass of :py:class:`Stream` that implements a common pattern used for these file
+ types. In a :py:class:`StreamWithDataContainer`, the data is directly read to or written from a
+ specialized data container.
- The data container structure for each type of CCCC file is implemented in
- the module for that file, as a subclass of :py:class:`DataContainer`. The
- subclasses for each CCCC file type define standard attribute names for the
- data that will be read from or written to the CCCC file. CCCC file types
- that follow this pattern include:
+ The data container structure for each type of CCCC file is implemented in the module for that
+ file, as a subclass of :py:class:`DataContainer`. The subclasses for each CCCC file type define
+ standard attribute names for the data that will be read from or written to the CCCC file. CCCC
+ file types that follow this pattern include:
* :py:class:`GEODST `
* :py:class:`DIF3D `
- * :py:class:`NHFLUX `
- (and multiple sub-classes thereof)
+ * :py:class:`NHFLUX ` (and multiple sub-classes)
* :py:class:`LABELS `
* :py:class:`PWDINT `
* :py:class:`RTFLUX `
* :py:class:`RZFLUX `
* :py:class:`RTFLUX `
- The logic to parse or write each specific file format is contained within
- the :py:meth:`Stream.readWrite` implementations of the respective
- subclasses.
+ The logic to parse or write each specific file format is contained within the
+ :py:meth:`Stream.readWrite` implementations of the respective subclasses.
"""
import io
import itertools
diff --git a/armi/nuclearDataIO/cccc/dlayxs.py b/armi/nuclearDataIO/cccc/dlayxs.py
index f62cc37503..c9a2f2c4ab 100644
--- a/armi/nuclearDataIO/cccc/dlayxs.py
+++ b/armi/nuclearDataIO/cccc/dlayxs.py
@@ -13,11 +13,11 @@
# limitations under the License.
"""
-Module to read DLAYXS files, which contain delayed neutron precursor data, including decay constants and emission
-spectra.
+Module to read DLAYXS files, which contain delayed neutron precursor data, including decay constants
+and emission spectra.
-Similar to ISOTXS files, DLAYXS files are often created by a lattice physics code such as MC2 and used as input
-to a global flux solver such as DIF3D.
+Similar to ISOTXS files, DLAYXS files are often created by a lattice physics code such as MC2 and
+used as input to a global flux solver such as DIF3D.
This module implements reading and writing of the DLAYXS, consistent with [CCCC-IV]_.
"""
@@ -38,18 +38,20 @@ class DelayedNeutronData:
"""
Container of information about delayed neutron precursors.
- This info should be enough to perform point kinetics problems and to compute the delayed neutron fraction.
+ This info should be enough to perform point kinetics problems and to compute the delayed neutron
+ fraction.
- This object represents data related to either one nuclide (as read from a data library)
- or an average over many nuclides (as computed after a delayed-neutron fraction calculation).
+ This object represents data related to either one nuclide (as read from a data library) or an
+ average over many nuclides (as computed after a delayed-neutron fraction calculation).
- For a problem with P precursor groups and G energy groups, delayed neutron precursor information includes:
+ For a problem with P precursor groups and G energy groups, delayed neutron precursor information
+ includes:
Attributes
----------
precursorDecayConstants : array
- This is P-length list of decay constants in (1/s) that characterize the decay rates of the delayed
- neutron precursors. When a precursor decays, it emits a delayed neutron.
+ This is P-length list of decay constants in (1/s) that characterize the decay rates of the
+ delayed neutron precursors. When a precursor decays, it emits a delayed neutron.
delayEmissionSpectrum : array
fraction of delayed neutrons emitted into each neutron energy group from each precursor family
@@ -58,10 +60,10 @@ class DelayedNeutronData:
Aka delayed-chi
delayNeutronsPerFission : array
- the multigroup number of delayed neutrons released per decay for each precursor group
- Note that this is equivalent to the number of delayed neutron precursors produced per fission in
- each family and energy group.
- Structure is identical to delayEmissionSpectrum. Aka delayed-nubar.
+ The multigroup number of delayed neutrons released per decay for each precursor group. Note
+ that this is equivalent to the number of delayed neutron precursors produced per fission in
+ each family and energy group. Structure is identical to delayEmissionSpectrum. Aka delayed-
+ nubar.
"""
def __init__(self, numEnergyGroups, numPrecursorGroups):
@@ -166,21 +168,11 @@ def G(self):
def generateAverageDelayedNeutronConstants(self):
"""
- Use externally-computed ``nuclideContributionFractions`` to produce an average ``DelayedNeutronData`` obj.
+ Use externally-computed ``nuclideContributionFractions`` to produce an average
+ ``DelayedNeutronData`` object.
- Solves typical averaging equation but weights already sum to 1.0 so we
- can skip normalization at the end.
-
- Notes
- -----
- Long ago, the DLAYXS file had the same constants for each nuclide (!?) and this method
- simply took the first. Later, it was updated to take an importance- and abundance-weighted
- average of the values on the DLAYXS library.
-
- A paper by Tuttle (1974) discusses some averaging but they end up saying that kinetics problems
- are mostly insensitive to the group constants ("errors of a few percent"). But in TWRs, we switch from U235 to Pu239
- and the difference may be important. We can try weighting by nuclide effective
- delayed neutron fractions beta_eff_nuclide/beta.
+ Solves typical averaging equation but weights already sum to 1.0 so we can skip
+ normalization at the end.
"""
avg = DelayedNeutronData(self.G, self.numPrecursorGroups)
diff --git a/armi/nuclearDataIO/cccc/gamiso.py b/armi/nuclearDataIO/cccc/gamiso.py
index af5e7f4933..30912fabcc 100644
--- a/armi/nuclearDataIO/cccc/gamiso.py
+++ b/armi/nuclearDataIO/cccc/gamiso.py
@@ -15,8 +15,8 @@
"""
Module for reading GAMISO files which contains gamma cross section data.
-GAMISO is a binary file created by MC**2-v3 that contains multigroup microscopic gamma cross sections. GAMISO data is
-contained within a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
+GAMISO is a binary file created by MC**2-v3 that contains multigroup microscopic gamma cross
+sections. GAMISO data is contained within a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
.. impl:: Tool to read and write GAMISO files.
:id: I_ARMI_NUCDATA_GAMISO
@@ -31,8 +31,9 @@
See [GAMSOR]_.
-.. [GAMSOR] Smith, M. A., Lee, C. H., and Hill, R. N. GAMSOR: Gamma Source Preparation and DIF3D Flux Solution. United States:
- N. p., 2016. Web. doi:10.2172/1343095. `On OSTI `_
+.. [GAMSOR] Smith, M. A., Lee, C. H., and Hill, R. N. GAMSOR: Gamma Source Preparation and DIF3D
+ Flux Solution. United States: N. p., 2016. Web. doi:10.2172/1343095. `On OSTI
+ `_
"""
from armi import runLog
@@ -80,8 +81,8 @@ def addDummyNuclidesToLibrary(lib, dummyNuclides):
Notes
-----
- Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to provide a
- consistent set of nuclide-level data across all the nuclides in a
+ Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to
+ provide a consistent set of nuclide-level data across all the nuclides in a
:py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
"""
if not dummyNuclides:
diff --git a/armi/nuclearDataIO/cccc/pmatrx.py b/armi/nuclearDataIO/cccc/pmatrx.py
index 9bdbe5f255..40e7061fe3 100644
--- a/armi/nuclearDataIO/cccc/pmatrx.py
+++ b/armi/nuclearDataIO/cccc/pmatrx.py
@@ -17,9 +17,10 @@
See [GAMSOR]_ and [MC23]_.
-.. [MC23] Lee, Changho, Jung, Yeon Sang, and Yang, Won Sik. MC2-3: Multigroup Cross Section Generation Code for Fast Reactor
- Analysis Nuclear. United States: N. p., 2018. Web. doi:10.2172/1483949.
- (`OSTI `_)
+.. [MC23] Lee, Changho, Jung, Yeon Sang, and Yang, Won Sik. MC2-3: Multigroup Cross Section
+ Generation Code for Fast Reactor Analysis Nuclear. United States: N. p., 2018. Web.
+ doi:10.2172/1483949. (`OSTI
+ `_)
"""
import traceback
@@ -148,12 +149,16 @@ def _read(fileName, fileMode):
def writeBinary(lib, fileName):
- """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object to a binary file."""
+ """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`
+ object to a binary file.
+ """
return _write(lib, fileName, "wb")
def writeAscii(lib, fileName):
- """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object to an ASCII file."""
+ """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`
+ object to an ASCII file.
+ """
return _write(lib, fileName, "w")
@@ -164,6 +169,7 @@ def _write(lib, fileName, fileMode):
def _readWrite(lib, fileName, fileMode, getNuclideFunc):
with PmatrxIO(fileName, lib, fileMode, getNuclideFunc) as rw:
rw.readWrite()
+
return lib
diff --git a/armi/nuclearDataIO/cccc/tests/test_dlayxs.py b/armi/nuclearDataIO/cccc/tests/test_dlayxs.py
index 929b7450bc..29d0fda9be 100644
--- a/armi/nuclearDataIO/cccc/tests/test_dlayxs.py
+++ b/armi/nuclearDataIO/cccc/tests/test_dlayxs.py
@@ -925,7 +925,8 @@ def _assertDC(self, nucName, endfProvidedData):
].precursorDecayConstants
self.assertTrue(numpy.allclose(dlayData, endfProvidedData, 1e-3))
except AssertionError:
- # this is reraised because generating the message might take some time to format all the data from the arrays
+ # this is reraised because generating the message might take some time to format all the
+ # data from the arrays
raise AssertionError(
"{} was different,\nexpected:{}\nactual:{}".format(
nucName, endfProvidedData, dlayData
@@ -935,7 +936,8 @@ def _assertDC(self, nucName, endfProvidedData):
pass
@unittest.skip(
- "All the delayNeutronsPerFission data from mcc-v3 does not agree, this may be because they are from ENDV/B VI.8."
+ "All the delayNeutronsPerFission data from mcc-v3 does not agree, this may be because they "
+ "are from ENDV/B VI.8."
)
def test_ENDFVII1NeutronsPerFission(self):
"""
@@ -1062,7 +1064,8 @@ def _assertNuDelay(self, nucName, endfProvidedData):
numpyData = numpy.array(endfProvidedData)
self.assertTrue(numpy.allclose(dlayData, numpyData, 1e-3))
except AssertionError:
- # this is reraised because generating the message might take some time to format all the data from the arrays
+ # this is reraised because generating the message might take some time to format all the
+ # data from the arrays
raise AssertionError(
"{} was different,\nexpected:{}\nactual:{}".format(
nucName, numpyData, dlayData
diff --git a/armi/nuclearDataIO/xsCollections.py b/armi/nuclearDataIO/xsCollections.py
index ed1a70f6fb..b066700ff5 100644
--- a/armi/nuclearDataIO/xsCollections.py
+++ b/armi/nuclearDataIO/xsCollections.py
@@ -15,11 +15,12 @@
"""
Cross section collections contain cross sections for a single nuclide or region.
-Specifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`, which
-then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
+Specifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`,
+which then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
-These may represent microscopic or macroscopic neutron or photon cross sections. When they are macroscopic,
-they generally represent a whole region with many nuclides, though this is not required.
+These may represent microscopic or macroscopic neutron or photon cross sections. When they are
+macroscopic, they generally represent a whole region with many nuclides, though this is not
+required.
See Also
--------
@@ -542,6 +543,7 @@ def _computeRemovalXS(self):
self.macros.removal += columnSum - diags
+# ruff: noqa: E501
def computeBlockAverageChi(b, isotxsLib):
r"""
Return the block average total chi vector based on isotope chi vectors.
diff --git a/armi/operators/tests/test_operators.py b/armi/operators/tests/test_operators.py
index 581a2a9230..f4f1188bbf 100644
--- a/armi/operators/tests/test_operators.py
+++ b/armi/operators/tests/test_operators.py
@@ -412,7 +412,8 @@ def test_computeTightCouplingConvergence(self):
Notes
-----
- - Assertion #1: ensure that the convergence of Keff, eps, is greater than 1e-5 (the prescribed convergence criteria)
+ - Assertion #1: ensure that the convergence of Keff, eps, is greater than 1e-5 (the
+ prescribed convergence criteria)
- Assertion #2: ensure that eps is (prevIterKeff - currIterKeff)
"""
prevIterKeff = 0.9
diff --git a/armi/physics/fuelCycle/fuelHandlers.py b/armi/physics/fuelCycle/fuelHandlers.py
index e4f8f29864..aa1070a18a 100644
--- a/armi/physics/fuelCycle/fuelHandlers.py
+++ b/armi/physics/fuelCycle/fuelHandlers.py
@@ -852,7 +852,7 @@ def dischargeSwap(self, incoming, outgoing):
have the same number and same height of stationary blocks. If not, return an error.
If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and
- :py:meth:`~armi.reactor.assemblies.Assembly.insert`` methods are used to swap the
+ :py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the
stationary blocks between the two assemblies.
Once this process is complete, the actual assembly movement can take place. Through this
diff --git a/armi/physics/fuelCycle/tests/test_fuelHandlers.py b/armi/physics/fuelCycle/tests/test_fuelHandlers.py
index c905d28d77..6e6bd73bfe 100644
--- a/armi/physics/fuelCycle/tests/test_fuelHandlers.py
+++ b/armi/physics/fuelCycle/tests/test_fuelHandlers.py
@@ -436,14 +436,8 @@ def runShuffling(self, fh):
fh.interactEOL()
def test_repeatShuffles(self):
- """Loads the ARMI test reactor with a custom shuffle logic file and shuffles assemblies twice.
-
- Notes
- -----
- The custom shuffle logic is executed by :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel`
- within :py:meth:`armi.physics.fuelCycle.tests.test_fuelHandlers.TestFuelHandler.runShuffling`. There are
- two primary assertions: spent fuel pool assemblies are in the correct location and the assemblies were shuffled
- into their correct locations. This process is repeated twice to ensure repeatability.
+ """Loads the ARMI test reactor with a custom shuffle logic file and shuffles assemblies
+ twice.
.. test:: Execute user-defined shuffle operations based on a reactor model.
:id: T_ARMI_SHUFFLE
@@ -452,6 +446,15 @@ def test_repeatShuffles(self):
.. test:: Move an assembly from one position in the core to another.
:id: T_ARMI_SHUFFLE_MOVE0
:tests: R_ARMI_SHUFFLE_MOVE
+
+ Notes
+ -----
+ The custom shuffle logic is executed by
+ :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` in
+ :py:meth:`armi.physics.fuelCycle.tests.test_fuelHandlers.TestFuelHandler.runShuffling`.
+ There are two primary assertions: spent fuel pool assemblies are in the correct location and
+ the assemblies were shuffled into their correct locations. This process is repeated twice to
+ ensure repeatability.
"""
# check labels before shuffling:
for a in self.r.sfp.getChildren():
@@ -461,9 +464,9 @@ def test_repeatShuffles(self):
fh = self.r.o.getInterface("fuelHandler")
self.runShuffling(fh) # changes caseTitle
- # make sure the generated shuffles file matches the tracked one.
- # This will need to be updated if/when more assemblies are added to the test reactor
- # but must be done carefully. Do not blindly rebaseline this file.
+ # Make sure the generated shuffles file matches the tracked one. This will need to be
+ # updated if/when more assemblies are added to the test reactor but must be done carefully.
+ # Do not blindly rebaseline this file.
self.compareFilesLineByLine("armiRun-SHUFFLES.txt", "armiRun2-SHUFFLES.txt")
# store locations of each assembly
@@ -644,8 +647,7 @@ def test_transferStationaryBlocks(self):
def test_transferDifferentNumberStationaryBlocks(self):
"""
- Test the _transferStationaryBlocks method
- for the case where the input assemblies have
+ Test the _transferStationaryBlocks method for the case where the input assemblies have
different numbers of stationary blocks.
"""
# grab stationary block flags
@@ -674,8 +676,7 @@ def test_transferDifferentNumberStationaryBlocks(self):
def test_transferUnalignedLocationStationaryBlocks(self):
"""
- Test the _transferStationaryBlocks method
- for the case where the input assemblies have
+ Test the _transferStationaryBlocks method for the case where the input assemblies have
unaligned locations of stationary blocks.
"""
# grab stationary block flags
@@ -809,8 +810,7 @@ def test_dischargeSwap(self):
def test_dischargeSwapIncompatibleStationaryBlocks(self):
"""
- Test the _transferStationaryBlocks method
- for the case where the input assemblies have
+ Test the _transferStationaryBlocks method for the case where the input assemblies have
different numbers as well as unaligned locations of stationary blocks.
"""
# grab stationary block flags
diff --git a/armi/physics/fuelPerformance/settings.py b/armi/physics/fuelPerformance/settings.py
index 7a8e56cd08..ec7b6ac163 100644
--- a/armi/physics/fuelPerformance/settings.py
+++ b/armi/physics/fuelPerformance/settings.py
@@ -44,7 +44,10 @@ def defineSettings():
CONF_FGYF,
default=0.25,
label="Fission Gas Yield Fraction",
- description="The fraction of gaseous atoms produced per fission event, assuming a fission product yield of 2.0",
+ description=(
+ "The fraction of gaseous atoms produced per fission event, assuming a "
+ "fission product yield of 2.0"
+ ),
),
setting.Setting(
CONF_AXIAL_EXPANSION,
diff --git a/armi/physics/neutronics/crossSectionGroupManager.py b/armi/physics/neutronics/crossSectionGroupManager.py
index f93d24cd78..dc7f2cbd46 100644
--- a/armi/physics/neutronics/crossSectionGroupManager.py
+++ b/armi/physics/neutronics/crossSectionGroupManager.py
@@ -320,13 +320,13 @@ class AverageBlockCollection(BlockCollection):
:id: I_ARMI_XSGM_CREATE_REPR_BLOCKS0
:implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS
- This class constructs new blocks from an existing block list based on a
- volume-weighted average. Inheriting functionality from the abstract
- :py:class:`Reactor ` object, this class
- will construct representative blocks using averaged parameters of all blocks in the given collection.
- Number density averages can be computed at a component level
- or at a block level by default. Average nuclide temperatures and burnup are also included when constructing a representative block.
-
+ This class constructs new blocks from an existing block list based on a volume-weighted
+ average. Inheriting functionality from the abstract
+ :py:class:`Reactor `
+ object, this class will construct representative blocks using averaged parameters of all
+ blocks in the given collection. Number density averages can be computed at a component level
+ or at a block level by default. Average nuclide temperatures and burnup are also included
+ when constructing a representative block.
"""
def _makeRepresentativeBlock(self):
@@ -407,11 +407,11 @@ def _getAverageComponentTemperature(self, compIndex):
Notes
-----
- Weighting is both by the block weight within the collection and the relative mass of the component.
- The block weight is already scaled by the block volume, so we need to pull that out of the block
- weighting because it would effectively be double-counted in the component mass. b.getHeight()
- is proportional to block volume, so it is used here as a computationally cheaper proxy for scaling
- by block volume.
+ Weighting is both by the block weight within the collection and the relative mass of the
+ Component. The block weight is already scaled by the block volume, so we need to pull that
+ out of the block weighting because it would effectively be double-counted in the component
+ mass. b.getHeight() is proportional to block volume, so it is used here as a computationally
+ cheaper proxy for scaling by block volume.
Returns
-------
@@ -440,9 +440,8 @@ def _performAverageByComponent(self):
"""
Check if block collection averaging can/should be performed by component.
- If the components of blocks in the collection are similar and the user
- has requested component-level averaging, return True.
- Otherwise, return False.
+ If the components of blocks in the collection are similar and the user has requested
+ Component-level averaging, return True. Otherwise, return False.
"""
if not self.averageByComponent:
return False
@@ -453,9 +452,8 @@ def _checkBlockSimilarity(self):
"""
Check if blocks in the collection have similar components.
- If the components of blocks in the collection are similar and the user
- has requested component-level averaging, return True.
- Otherwise, return False.
+ If the components of blocks in the collection are similar and the user has requested
+ Component-level averaging, return True. Otherwise, return False.
"""
cFlags = dict()
for b in self.getCandidateBlocks():
@@ -481,8 +479,8 @@ def getBlockNuclideTemperatureAvgTerms(block, allNucNames):
This volume-weights the densities by component volume fraction.
- It's important to count zero-density nuclides (i.e. ones like AM242 that are expected to build up)
- as trace values at the proper component temperatures.
+ It's important to count zero-density nuclides (i.e. ones like AM242 that are expected to build
+ up) as trace values at the proper component temperatures.
"""
def getNumberDensitiesWithTrace(component, allNucNames):
@@ -513,29 +511,30 @@ def getNumberDensitiesWithTrace(component, allNucNames):
class CylindricalComponentsAverageBlockCollection(BlockCollection):
"""
- Creates a representative block for the purpose of cross section generation with a one-dimensional
- cylindrical model.
+ Creates a representative block for the purpose of cross section generation with a one-
+ dimensional cylindrical model.
.. impl:: Create representative blocks using custom cylindrical averaging.
:id: I_ARMI_XSGM_CREATE_REPR_BLOCKS1
:implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS
- This class constructs representative blocks based on a volume-weighted average
- using cylindrical blocks from an existing block list. Inheriting functionality from the abstract
- :py:class:`Reactor ` object, this class
- will construct representative blocks using averaged parameters of all blocks in the given collection.
- Number density averages are computed at a component level. Nuclide temperatures from a median block-average temperature
- are used and the average burnup is evaluated across all blocks in the block list.
+ This class constructs representative blocks based on a volume-weighted average using
+ cylindrical blocks from an existing block list. Inheriting functionality from the abstract
+ :py:class:`Reactor `
+ object, this class will construct representative blocks using averaged parameters of all
+ blocks in the given collection. Number density averages are computed at a component level.
+ Nuclide temperatures from a median block-average temperature are used and the average burnup
+ is evaluated across all blocks in the block list.
Notes
-----
- When generating the representative block within this collection, the geometry is checked
- against all other blocks to ensure that the number of components are consistent. This implementation
- is intended to be opinionated, so if a user attempts to put blocks that have geometric differences
+ When generating the representative block within this collection, the geometry is checked against
+ all other blocks to ensure that the number of components are consistent. This implementation is
+ intended to be opinionated, so if a user attempts to put blocks that have geometric differences
then this will fail.
- This selects a representative block based on the collection of candidates based on the
- median block average temperatures as an assumption.
+ This selects a representative block based on the collection of candidates based on the median
+ Block-average temperatures as an assumption.
"""
def _getNewBlock(self):
@@ -583,17 +582,18 @@ def _checkComponentConsistency(b, repBlock):
Raises
------
ValueError
- When the components in a candidate block do not align with
- the components in the representative block. This check includes component area, component multiplicity,
- and nuclide composition.
+ When the components in a candidate block do not align with the components in the
+ representative Block. This check includes component area, component multiplicity, and
+ nuclide composition.
"""
if len(b) != len(repBlock):
raise ValueError(
- f"Blocks {b} and {repBlock} have differing number "
- "of components and cannot be homogenized"
+ f"Blocks {b} and {repBlock} have differing number of components and cannot be "
+ "homogenized"
)
- # Using Fe-56 as a proxy for structure and Na-23 as proxy for coolant is undesirably SFR-centric
- # This should be generalized in the future, if possible
+
+ # TODO: Using Fe-56 as a proxy for structure and Na-23 as proxy for coolant is undesirably
+ # SFR-centric. This should be generalized in the future, if possible.
consistentNucs = {"PU239", "U238", "U235", "U234", "FE56", "NA23", "O16"}
for c, repC in zip(sorted(b), sorted(repBlock)):
compString = (
@@ -629,7 +629,9 @@ def _getAverageComponentNucs(self, components, bWeights):
return allNucNames, densities / totalWeight
def _orderComponentsInGroup(self, repBlock):
- """Order the components based on dimension and material type within the representative block."""
+ """Order the components based on dimension and material type within the representative
+ Block.
+ """
for b in self.getCandidateBlocks():
self._checkComponentConsistency(b, repBlock)
componentLists = [list(sorted(b)) for b in self.getCandidateBlocks()]
@@ -655,13 +657,13 @@ class SlabComponentsAverageBlockCollection(BlockCollection):
Notes
-----
- - Ignores lumped fission products since there is no foreseeable need for burn calculations in 1D slab geometry
- since it is used for low power neutronic validation.
- - Checks for consistent component dimensions for all blocks in a group and then creates a new block.
- - Iterates through components of all blocks and calculates component average number densities. This calculation
- takes the first component of each block, averages the number densities, and applies this to the number density
- to the representative block.
-
+ - Ignores lumped fission products since there is no foreseeable need for burn calculations in 1D
+ slab geometry since it is used for low power neutronic validation.
+ - Checks for consistent component dimensions for all blocks in a group and then creates a new
+ Block.
+ - Iterates through components of all blocks and calculates component average number densities.
+ This calculation takes the first component of each block, averages the number densities, and
+ applies this to the number density to the representative block.
"""
def _getNewBlock(self):
@@ -704,14 +706,16 @@ def _checkComponentConsistency(b, repBlock, components=None):
Raises
------
ValueError
- When the components in a candidate block do not align with
- the components in the representative block. This check includes component area, component multiplicity,
- and nuclide composition.
+ When the components in a candidate block do not align with the components in the
+ representative block. This check includes component area, component multiplicity, and
+ nuclide composition.
TypeError
When the shape of the component is not a rectangle.
- .. warning:: This only checks ``consistentNucs`` for ones that are important in ZPPR and BFS.
+ Warning
+ -------
+ This only checks ``consistentNucs`` for ones that are important in ZPPR and BFS.
"""
comps = b if components is None else components
@@ -770,7 +774,8 @@ def _removeLatticeComponents(repBlock):
Notes
-----
- - This component does not serve any purpose for XS generation as it contains void material with zero area.
+ - This component does not serve any purpose for XS generation as it contains void material
+ with zero area.
- Removing this component does not modify the blocks within the reactor.
"""
for c in repBlock.iterComponents():
@@ -960,7 +965,7 @@ def interactCoupled(self, iteration):
See Also
--------
- :py:meth:`Assembly `
+ :py:meth:`~armi.physics.neutronics.latticePhysics.latticePhysics.LatticePhysicsInterface.interactCoupled`
"""
if (
iteration == 0
diff --git a/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py b/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py
index f42669afcb..7b28e77cc3 100644
--- a/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py
+++ b/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py
@@ -31,13 +31,16 @@ def defineSettings():
default="infinitelyDilute",
label="Fission Product Model",
description=(
- "This setting is used to determine how fission products are treated in an analysis. "
- "By choosing `noFissionProducts`, no fission products will be added. By selecting, `infinitelyDilute`, "
- "lumped fission products will be initialized to a very small number on the blocks/components that require them. "
- "By choosing `MO99`, the fission products will be represented only by Mo-99. This is a simplistic assumption that "
- "is commonly used by fast reactor analyses in scoping calculations and is not necessarily a great assumption for "
- "depletion evaluations. Finally, by choosing `explicitFissionProducts` the fission products will be added explicitly "
- "to the blocks/components that are depletable. This is useful for detailed tracking of fission products."
+ "This setting is used to determine how fission products are treated in an "
+ "analysis. By choosing `noFissionProducts`, no fission products will be added. By "
+ "selecting, `infinitelyDilute`, lumped fission products will be initialized to a "
+ "very small number on the blocks/components that require them. By choosing `MO99`, "
+ "the fission products will be represented only by Mo-99. This is a simplistic "
+ "assumption that is commonly used by fast reactor analyses in scoping calculations "
+ "and is not necessarily a great assumption for depletion evaluations. Finally, by "
+ "choosing `explicitFissionProducts` the fission products will be added explicitly "
+ "to the blocks/components that are depletable. This is useful for detailed tracking "
+ "of fission products."
),
options=[
"noFissionProducts",
@@ -51,16 +54,13 @@ def defineSettings():
default="",
label="Fission Product Library",
description=(
- f"This setting can used when the `{CONF_FP_MODEL}` setting "
- "is set to `explicitFissionProducts` and is used to configure "
- "all the nuclides that should be modeled within the core. "
- "Setting this is equivalent to adding all nuclides in the "
- "selected code library (i.e., MC2-3) within the blueprints "
- "`nuclideFlags` to be [xs:true, burn:false]. This option acts "
- "as a short-cut so that analysts do not need to change their "
- "inputs when modifying the fission product treatment for "
- "calculations. This may be extended for other cross section "
- "generation codes."
+ f"This setting can used when the `{CONF_FP_MODEL}` setting is set to "
+ "`explicitFissionProducts` and is used to configure all the nuclides that should "
+ "be modeled within the core. Setting this is equivalent to adding all nuclides in "
+ "the selected code library (i.e., MC2-3) within the blueprints `nuclideFlags` to "
+ "be [xs:true, burn:false]. This option acts as a short-cut so that analysts do not "
+ "need to change their inputs when modifying the fission product treatment for "
+ "calculations. This may be extended for other cross section generation codes."
),
options=[
"",
@@ -72,9 +72,9 @@ def defineSettings():
default=False,
label="Use Independent LFPs",
description=(
- "Flag to make all blocks have independent lumped fission products. Note that this is forced to be True "
- "when the ``explicitFissionProducts`` modeling option is selected or an interface named `mcnp` is "
- "on registered on the operator stack."
+ "Flag to make all blocks have independent lumped fission products. Note that this "
+ "is forced to be True when the `explicitFissionProducts` modeling option is "
+ "selected or an interface named `mcnp` is on registered on the operator stack."
),
),
setting.Setting(
@@ -82,9 +82,9 @@ def defineSettings():
default=fissionProductModel.REFERENCE_LUMPED_FISSION_PRODUCT_FILE,
label="LFP Definition File",
description=(
- "Path to the file that contains lumped fission product composition "
- "definitions (e.g. equilibrium yields). This is unused when the "
- "`explicitFissionProducts` or `MO99` modeling options are selected."
+ "Path to the file that contains lumped fission product composition definitions "
+ "(e.g. equilibrium yields). This is unused when the `explicitFissionProducts` or "
+ "`MO99` modeling options are selected."
),
),
]
@@ -103,8 +103,8 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts"
and not bool(inspector.cs["initializeBurnChain"]),
(
- "The burn chain is not being initialized and the fission product model is not set to `explicitFissionProducts`. "
- "This will likely fail."
+ "The burn chain is not being initialized and the fission product model is not set "
+ "to `explicitFissionProducts`. This will likely fail."
),
f"Would you like to set the `{CONF_FP_MODEL}` to `explicitFissionProducts`?",
lambda: inspector._assignCS(CONF_FP_MODEL, "explicitFissionProducts"),
@@ -116,8 +116,9 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts"
and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] != "",
(
- "The explicit fission product model is disabled and the fission product model library is set. This will have no "
- f"impact on the results, but it is best to disable the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option."
+ "The explicit fission product model is disabled and the fission product model "
+ "library is set. This will have no impact on the results, but it is best to "
+ f"disable the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option."
),
"Would you like to do this?",
lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, ""),
@@ -129,8 +130,8 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts"
and bool(inspector.cs["initializeBurnChain"]),
(
- "The explicit fission product model is enabled, but initializing the burn chain is also enabled. This will "
- "likely fail."
+ "The explicit fission product model is enabled, but initializing the burn chain is "
+ "also enabled. This will likely fail."
),
"Would you like to disable the burn chain initialization?",
lambda: inspector._assignCS("initializeBurnChain", False),
@@ -142,10 +143,14 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts"
and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == "",
(
- "The explicit fission product model is enabled and the fission product model library is disabled. May result in "
- "no fission product nuclides being added to the case, unless these have manually added in `nuclideFlags`."
+ "The explicit fission product model is enabled and the fission product model "
+ "library is disabled. May result in no fission product nuclides being added to the "
+ "case, unless these have manually added in `nuclideFlags`."
+ ),
+ (
+ f"Would you like to set the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option to be "
+ "equal to the default implementation of MC2-3?."
),
- f"Would you like to set the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option to be equal to the default implementation of MC2-3?.",
lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, "MC2-3"),
)
)
diff --git a/armi/physics/neutronics/globalFlux/globalFluxInterface.py b/armi/physics/neutronics/globalFlux/globalFluxInterface.py
index 1b202f91c1..3f2272b7a2 100644
--- a/armi/physics/neutronics/globalFlux/globalFluxInterface.py
+++ b/armi/physics/neutronics/globalFlux/globalFluxInterface.py
@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""The Global flux interface provide a base class for all neutronics tools that compute the neutron and/or photon flux."""
+"""The Global flux interface provide a base class for all neutronics tools that compute the neutron
+and/or photon flux.
+"""
import math
from typing import Dict, Optional
@@ -1029,7 +1031,8 @@ def updateFluenceAndDpa(self, stepTimeInSeconds, blockList=None):
b.p.percentBuPeak + peakRatePerSecond * stepTimeInSeconds
)
else:
- # No rate, make bad assumption.... assumes peaking is same at each position through shuffling/irradiation history...
+ # No rate, make bad assumption.... assumes peaking is same at each position through
+ # shuffling/irradiation history...
runLog.warning(
"Scaling burnup by current peaking factor... This assumes peaking "
"factor was constant through shuffling/irradiation history.",
@@ -1045,7 +1048,7 @@ def updateFluenceAndDpa(self, stepTimeInSeconds, blockList=None):
self.updateLoadpadDose()
def updateCycleDoseParams(self):
- r"""Updates reactor params based on the amount of dose (detailedDpa) accrued this cycle.
+ """Updates reactor params based on the amount of dose (detailedDpa) accrued this cycle.
Params updated include:
@@ -1135,7 +1138,6 @@ def updateLoadpadDose(self):
See Also
--------
_calcLoadPadDose : computes the load pad dose
-
"""
peakPeak, peakAvg = self._calcLoadPadDose()
if peakPeak is None:
@@ -1165,8 +1167,7 @@ def _calcLoadPadDose(self):
loadPadLength : float
The axial length of the load pad to average over
- This builds axial splines over the assemblies and then integrates them
- over the load pad.
+ This builds axial splines over the assemblies and then integrates them over the load pad.
The assumptions are that detailedDpa is the average, defined in the center
and detailedDpaPeak is the peak, also defined in the center of blocks.
@@ -1187,7 +1188,6 @@ def _calcLoadPadDose(self):
--------
writeLoadPadDoseSummary : prints out the dose
Assembly.getParamValuesAtZ : gets the parameters at any arbitrary z point
-
"""
loadPadBottom = self.options.loadPadElevation
loadPadLength = self.options.loadPadLength
@@ -1326,7 +1326,6 @@ def calcReactionRates(obj, keff, lib):
lib : XSLibrary
Microscopic cross sections to use in computing the reaction rates.
-
.. impl:: Return the reaction rates for a given ArmiObject
:id: I_ARMI_FLUX_RX_RATES
:implements: R_ARMI_FLUX_RX_RATES
diff --git a/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py b/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py
index 5b34fbdd16..57f90a70df 100644
--- a/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py
+++ b/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py
@@ -306,7 +306,8 @@ def test_executerInteractionNonUniformAssems(self, mockConverterFactory):
This will serve as a broad end-to-end test of the interface, and also
stress test the mesh issues with non-uniform assemblies.
- .. test:: Run the global flux interface to show the geometry converter is called when the nonuniform mesh option is used.
+ .. test:: Run the global flux interface to show the geometry converter is called when the
+ nonuniform mesh option is used.
:id: T_ARMI_FLUX_GEOM_TRANSFORM_CONV
:tests: R_ARMI_FLUX_GEOM_TRANSFORM
"""
@@ -335,8 +336,8 @@ class TestGlobalFluxResultMapper(unittest.TestCase):
"""
def test_mapper(self):
- # Switch to MC2v2 setting to make sure the isotopic/elemental expansions are compatible
- # with actually doing some math using the ISOAA test microscopic library
+ # Switch to MC2v2 setting to make sure the isotopic/elemental expansions are compatible with
+ # actually doing some math using the ISOAA test microscopic library
o, r = test_reactors.loadTestReactor(customSettings={CONF_XS_KERNEL: "MC2v2"})
applyDummyFlux(r)
r.core.lib = isotxs.readBinary(ISOAA_PATH)
@@ -425,11 +426,11 @@ def test_calcReactionRates(self):
"""
Test that the reaction rate code executes and sets a param > 0.0.
+ TODO: This could validate the reaction rate calculation.
+
.. test:: Return the reaction rates for a given ArmiObject.
:id: T_ARMI_FLUX_RX_RATES
:tests: R_ARMI_FLUX_RX_RATES
-
- .. warning:: This does not validate the reaction rate calculation.
"""
b = test_blocks.loadTestBlock()
test_blocks.applyDummyData(b)
@@ -442,7 +443,7 @@ def test_calcReactionRates(self):
def applyDummyFlux(r, ng=33):
- """Set arbitrary flux distribution on reactor."""
+ """Set arbitrary flux distribution on a Reactor."""
for b in r.core.getBlocks():
b.p.power = 1.0
b.p.mgFlux = numpy.arange(ng, dtype=numpy.float64)
diff --git a/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py b/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py
index 57a591d7e9..82be2af188 100644
--- a/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py
+++ b/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py
@@ -69,7 +69,7 @@ def isDepletable(obj: composites.ArmiObject):
class AbstractIsotopicDepleter:
- r"""
+ """
Interact with a depletion code.
This interface and subClasses deplete under a flux defined outside this
@@ -123,7 +123,7 @@ def getToDeplete(self):
return list(self._depleteByName.values())
def run(self):
- r"""
+ """
Submit depletion case with external solver to the cluster.
In addition to running the physics kernel, this method calls the waitForJob method
@@ -154,14 +154,12 @@ def makeXsecTable(
a list of the nucNames of active isotopes
isotxs: isotxs object
headerFormat: string (optional)
- this is the format in which the elements of the header with be returned
- -- i.e. if you use a .format() call with the case name you'll return a
- formatted list of string elements
+ this is the format in which the elements of the header with be returned -- i.e. if you use a
+ .format() call with the case name you'll return a formatted list of string elements
tableFormat: string (optional)
- this is the format in which the elements of the table with be returned
- -- i.e. if you use a .format() call with mcnpId, nG, nF, n2n, n3n, nA,
- and nP you'll get the format you want. If you use a .format() call with the case name you'll return a
- formatted list of string elements
+ This is the format in which the elements of the table with be returned -- i.e. if you use a
+ .format() call with mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If
+ you use a .format() call with the case name you'll return a formatted list of strings.
Results
-------
@@ -196,10 +194,12 @@ def makeXsecTable(
class AbstractIsotopicDepletionReader(interfaces.OutputReader):
- r"""Read number density output produced by the isotopic depletion."""
+ """Read number density output produced by the isotopic depletion."""
def read(self):
- r"""Read a isotopic depletion Output File and applies results to armi objects in the ``ToDepletion`` attribute."""
+ """Read a isotopic depletion Output File and applies results to armi objects in the
+ ``ToDepletion`` attribute.
+ """
raise NotImplementedError
@@ -209,8 +209,8 @@ class Csrc:
Notes
-----
- The chemical vector is a dictionary of chemicals and their removal rate
- constant -- this works like a decay constant.
+ The chemical vector is a dictionary of chemicals and their removal rate constant -- this works
+ like a decay constant.
The isotopic vector is used to make a source material in continuous source definitions.
diff --git a/armi/physics/neutronics/parameters.py b/armi/physics/neutronics/parameters.py
index 269598980a..013e86d7b6 100644
--- a/armi/physics/neutronics/parameters.py
+++ b/armi/physics/neutronics/parameters.py
@@ -15,8 +15,8 @@
"""
Parameter definitions for the Neutronics Plugin.
-We hope neutronics plugins that compute flux will use ``mgFlux``, etc.,
-which will enable modular construction of apps.
+We hope neutronics plugins that compute flux will use ``mgFlux``, etc., which will enable modular
+construction of apps.
"""
from armi.physics.neutronics.settings import CONF_DPA_PER_FLUENCE
from armi.reactor import parameters
@@ -144,7 +144,10 @@ def _getNeutronicsBlockParams():
pb.defParam(
"mgFluxSK",
units=f"n*{units.CM}/{units.SECONDS}",
- description="multigroup volume-integrated flux stored for multiple time steps in spatial kinetics (2-D array)",
+ description=(
+ "multigroup volume-integrated flux stored for multiple time steps in "
+ "spatial kinetics (2-D array)"
+ ),
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=False,
categories=[
@@ -160,9 +163,9 @@ def _getNeutronicsBlockParams():
"pinMgFluxes",
units=f"n/{units.CM}^2/{units.SECONDS}",
description="""
- The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g for pin i. Flux
- units are the standard n/cm^2/s. The "ARMI pin ordering" is used, which is counter-clockwise from 3
- o'clock.
+ The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g
+ for pin i. Flux units are the standard n/cm^2/s. The "ARMI pin ordering" is used, which
+ is counter-clockwise from 3 o'clock.
""",
categories=[parameters.Category.pinQuantities],
saveToDB=True,
@@ -248,12 +251,13 @@ def _getNeutronicsBlockParams():
"linPow",
units=f"{units.WATTS}/{units.METERS}",
description=(
- "Pin-averaged linear heat rate, which is calculated by evaluating the block power and dividing "
- "by the number of pins. If gamma transport is enabled, then this represents the combined "
- "neutron and gamma heating. If gamma transport is disabled then this represents the energy "
- "generation in the pin, where gammas are assumed to deposit their energy locally. Note that this "
- "value does not implicitly account for axial and radial peaking factors within the block. Use `linPowByPin` "
- "for obtaining the pin linear heat rate with peaking factors included."
+ "Pin-averaged linear heat rate, which is calculated by evaluating the block power "
+ "and dividing by the number of pins. If gamma transport is enabled, then this "
+ "represents the combined neutron and gamma heating. If gamma transport is disabled "
+ "then this represents the energy generation in the pin, where gammas are assumed to "
+ "deposit their energy locally. Note that this value does not implicitly account "
+ "for axial and radial peaking factors within the block. Use `linPowByPin` for "
+ "obtaining the pin linear heat rate with peaking factors included."
),
location=ParamLocation.AVERAGE,
default=0.0,
@@ -270,9 +274,9 @@ def _getNeutronicsBlockParams():
description=(
"Pin linear linear heat rate, which is calculated through flux reconstruction and "
"accounts for axial and radial peaking factors. This differs from the `linPow` "
- "parameter, which assumes no axial and radial peaking in the block as this information "
- "is unavailable without detailed flux reconstruction. The same application of neutron and gamma "
- "heating results applies."
+ "parameter, which assumes no axial and radial peaking in the block as this "
+ "information is unavailable without detailed flux reconstruction. The same "
+ "application of neutron and gamma heating results applies."
),
location=ParamLocation.CHILDREN,
categories=[parameters.Category.pinQuantities],
@@ -673,7 +677,10 @@ def _getNeutronicsBlockParams():
"detailedDpaThisCycle",
units=units.DPA,
location=ParamLocation.AVERAGE,
- description="Displacement per atom accumulated during this cycle. This accumulates over a cycle and resets to zero at BOC.",
+ description=(
+ "Displacement per atom accumulated during this cycle. This accumulates "
+ "over a cycle and resets to zero at BOC."
+ ),
categories=[
parameters.Category.cumulativeOverCycle,
parameters.Category.detailedAxialExpansion,
@@ -691,7 +698,10 @@ def _getNeutronicsBlockParams():
pb.defParam(
"dpaPeakFromFluence",
units=units.DPA,
- description=f"DPA approximation based on a fluence conversion factor set in the {CONF_DPA_PER_FLUENCE} setting",
+ description=(
+ "DPA approximation based on a fluence conversion factor set in the "
+ f"{CONF_DPA_PER_FLUENCE} setting"
+ ),
location=ParamLocation.MAX,
categories=[
parameters.Category.cumulative,
@@ -724,7 +734,10 @@ def _getNeutronicsBlockParams():
pb.defParam(
"pdensGenerated",
units=f"{units.WATTS}/{units.CM}^3",
- description="Volume-averaged generated power density. Different than b.p.pdens only when gamma transport is activated.",
+ description=(
+ "Volume-averaged generated power density. Different than b.p.pdens only "
+ "when gamma transport is activated."
+ ),
location=ParamLocation.AVERAGE,
categories=[parameters.Category.gamma],
)
diff --git a/armi/physics/neutronics/tests/test_crossSectionManager.py b/armi/physics/neutronics/tests/test_crossSectionManager.py
index dc4f803797..1e73d6c668 100644
--- a/armi/physics/neutronics/tests/test_crossSectionManager.py
+++ b/armi/physics/neutronics/tests/test_crossSectionManager.py
@@ -258,7 +258,8 @@ def setUp(self):
def test_getAverageComponentNumberDensities(self):
"""Test component number density averaging."""
- # becaue of the way densities are set up, the middle block (index 1 of 0-2) component densities are equivalent to the average
+ # becaue of the way densities are set up, the middle block (index 1 of 0-2) component
+ # densities are equivalent to the average
b = self.bc[1]
for compIndex, c in enumerate(b.getComponents()):
avgDensities = self.bc._getAverageComponentNumberDensities(compIndex)
@@ -774,15 +775,14 @@ def test_getRepresentativeBlocks(self):
_o, r = test_reactors.loadTestReactor(TEST_ROOT)
self.csm.r = r
- # Assumption: All sodium in fuel blocks for this test is 450 C and this is the
- # expected sodium temperature.
- # These lines of code take the first sodium block and decrease the temperature of the block,
- # but change the atom density to approximately zero.
- # Checking later on the nuclide temperature of sodium is asserted to be still 450.
- # This perturbation proves that altering the temperature of an component with near zero atom density
- # does not affect the average temperature of the block collection.
- # This demonstrates that the temperatures of a block collection are atom weighted rather than just the
- # average temperature.
+ # Assumption: All sodium in fuel blocks for this test is 450 C and this is the expected
+ # sodium temperature. These lines of code take the first sodium block and decrease the
+ # temperature of the block, but change the atom density to approximately zero. Checking
+ # later on the nuclide temperature of sodium is asserted to be still 450. This perturbation
+ # proves that altering the temperature of an component with near zero atom density does not
+ # affect the average temperature of the block collection. This demonstrates that the
+ # temperatures of a block collection are atom weighted rather than just the average
+ # temperature.
regularFuel = r.core.getFirstBlock(Flags.FUEL, exact=True)
intercoolant = regularFuel.getComponent(Flags.INTERCOOLANT)
intercoolant.setTemperature(100) # just above melting
@@ -819,12 +819,13 @@ def test_getRepresentativeBlocks(self):
def test_createRepresentativeBlocksUsingExistingBlocks(self):
"""
- Demonstrates that a new representative block can be generated from an existing representative block.
+ Demonstrates that a new representative block can be generated from an existing
+ representative block.
Notes
-----
- This tests that the XS ID of the new representative block is correct and that the compositions are identical
- between the original and the new representative blocks.
+ This tests that the XS ID of the new representative block is correct and that the
+ compositions are identical between the original and the new representative blocks.
"""
_o, r = test_reactors.loadTestReactor(TEST_ROOT)
self.csm.createRepresentativeBlocks()
@@ -879,7 +880,8 @@ def test_interactBOC(self):
def test_interactEveryNode(self):
"""Test `everyNode` lattice physics update frequency.
- .. test:: The cross-section group manager frequency depends on the LPI frequency at every time node.
+ .. test:: The cross-section group manager frequency depends on the LPI frequency at every
+ time node.
:id: T_ARMI_XSGM_FREQ2
:tests: R_ARMI_XSGM_FREQ
"""
@@ -895,7 +897,8 @@ def test_interactEveryNode(self):
def test_interactFirstCoupledIteration(self):
"""Test `firstCoupledIteration` lattice physics update frequency.
- .. test:: The cross-section group manager frequency depends on the LPI frequency during first coupled iteration.
+ .. test:: The cross-section group manager frequency depends on the LPI frequency during
+ first coupled iteration.
:id: T_ARMI_XSGM_FREQ3
:tests: R_ARMI_XSGM_FREQ
"""
@@ -937,8 +940,8 @@ def test_xsgmIsRunBeforeXS(self):
def test_copyPregeneratedFiles(self):
"""
- Tests copying pre-generated cross section and flux files
- using reactor that is built from a case settings file.
+ Tests copying pre-generated cross section and flux files using reactor that is built from a
+ case settings file.
"""
o, r = test_reactors.loadTestReactor(TEST_ROOT)
# Need to overwrite the relative paths with absolute
@@ -973,6 +976,5 @@ def test_conversion_2digit(self):
def makeBlocks(howMany=20):
_o, r = test_reactors.loadTestReactor(TEST_ROOT)
- return r.core.getBlocks(Flags.FUEL)[
- 3 : howMany + 3
- ] # shift y 3 to skip central assemblies 1/3 volume
+ # shift y 3 to skip central assemblies 1/3 volume
+ return r.core.getBlocks(Flags.FUEL)[3 : howMany + 3]
diff --git a/armi/physics/thermalHydraulics/parameters.py b/armi/physics/thermalHydraulics/parameters.py
index 88e6384ec4..a6af2c1bac 100644
--- a/armi/physics/thermalHydraulics/parameters.py
+++ b/armi/physics/thermalHydraulics/parameters.py
@@ -178,14 +178,16 @@ def _getBlockParams():
pb.defParam(
"THaverageCladTemp",
units=units.DEGC,
- description="The nominal average clad temperature in the block, which should be used for neutronic and TH feedback.",
+ description="The nominal average clad temperature in the block, which should be used "
+ "for neutronic and TH feedback.",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"THaverageGapTemp",
units=units.DEGC,
- description="The nominal average gap temperature in the block, which should be used for neutronic and TH feedback.",
+ description="The nominal average gap temperature in the block, which should be used "
+ "for neutronic and TH feedback.",
location=ParamLocation.AVERAGE,
saveToDB=True,
)
@@ -193,7 +195,8 @@ def _getBlockParams():
pb.defParam(
"THaverageDuctTemp",
units=units.DEGC,
- description="The nominal average duct temperature in the block, which should be used for neutronic and TH feedback.",
+ description="The nominal average duct temperature in the block, which should be used "
+ "for neutronic and TH feedback.",
location=ParamLocation.AVERAGE,
)
diff --git a/armi/reactor/assemblyParameters.py b/armi/reactor/assemblyParameters.py
index 5e5ff700bf..8e94734e80 100644
--- a/armi/reactor/assemblyParameters.py
+++ b/armi/reactor/assemblyParameters.py
@@ -82,35 +82,34 @@ def getAssemblyParameterDefinitions():
pb.defParam(
"chargeFis",
units=units.KG,
- description="Fissile mass in assembly when it most recently entered the core."
- " If the assembly was discharged and then re-charged, this value will only"
- " reflect the most recent charge.",
+ description="Fissile mass in assembly when it most recently entered the core. If the "
+ "assembly was discharged and then re-charged, this value will only reflect the most "
+ "recent charge.",
)
pb.defParam(
"chargeTime",
units=units.YEARS,
- description="Time at which this assembly most recently entered the core."
- " If the assembly was discharged and then re-charged, this value will only"
- " reflect the most recent charge.",
+ description="Time at which this assembly most recently entered the core. If the "
+ "assembly was discharged and then re-charged, this value will only reflect the most "
+ "recent charge.",
default=parameters.NoDefault,
)
pb.defParam(
"multiplicity",
units=units.UNITLESS,
- description="The number of physical assemblies that the associated object "
- "represents. This is typically 1, but may need to change when the assembly "
- "is moved between containers with different types of symmetry. For "
- "instance, if an assembly moves from a Core with 1/3rd symmetry into a "
- "spent-fuel pool with full symmetry, rather than splitting the assembly "
- "into 3, the multiplicity can be set to 3. For now, this is a bit of a "
- "hack to make fuel handling work; multiplicity in the 1/3 core should "
- "be 3 to begin with, in which case this parameter could be used as the "
- "primary means of handling symmetry and fractional domains throughout "
- "ARMI. We will probably roll that out once the dust settles on some of "
- "this SFP work. For now, the Core stores multiplicity as 1 always, since "
- "the powerMultiplier to adjust to full-core quantities.",
+ description="The number of physical assemblies that the associated object represents. "
+ "This is typically 1, but may need to change when the assembly is moved between "
+ "containers with different types of symmetry. For instance, if an assembly moves from "
+ "a Core with 1/3rd symmetry into a spent-fuel pool with full symmetry, rather than "
+ "splitting the assembly into 3, the multiplicity can be set to 3. For now, this is a "
+ "bit of a hack to make fuel handling work; multiplicity in the 1/3 core should be 3 to "
+ "begin with, in which case this parameter could be used as the primary means of "
+ "handling symmetry and fractional domains throughout ARMI. We will probably roll that "
+ "out once the dust settles on some of this SFP work. For now, the Core stores "
+ "multiplicity as 1 always, since the powerMultiplier to adjust to full-core "
+ "quantities.",
default=1,
)
@@ -168,10 +167,9 @@ def _enforceNotesRestrictions(self, value):
pb.defParam(
"notes",
units=units.UNITLESS,
- description="A string with notes about the assembly, limited to 1000 characters."
- " This parameter is not meant to store data. Needlessly storing large strings"
- " on this parameter for every assembly is potentially unwise from a memory"
- " perspective.",
+ description="A string with notes about the assembly, limited to 1000 characters. This "
+ "parameter is not meant to store data. Needlessly storing large strings on this "
+ "parameter for every assembly is potentially unwise from a memory perspective.",
saveToDB=True,
default="",
setter=_enforceNotesRestrictions,
@@ -185,8 +183,9 @@ def _enforceNotesRestrictions(self, value):
"crCriticalFraction",
units=units.UNITLESS,
description=(
- "The insertion fraction when the control rod assembly is in its critical configuration. "
- "Note that the default of -1.0 is a trigger for this value not being set yet."
+ "The insertion fraction when the control rod assembly is in its critical "
+ "configuration. Note that the default of -1.0 is a trigger for this value not "
+ "being set yet."
),
saveToDB=True,
default=-1.0,
@@ -204,9 +203,9 @@ def _enforceNotesRestrictions(self, value):
"crInsertedElevation",
units=units.CM,
description=(
- "The elevation of the furthest-most insertion point of a control rod assembly. For a control rod assembly "
- "inserted from the top, this will be the lower tip of the bottom-most moveable section in the assembly when "
- "fully inserted."
+ "The elevation of the furthest-most insertion point of a control rod assembly. For "
+ "a control rod assembly inserted from the top, this will be the lower tip of the "
+ "bottom-most moveable section in the assembly when fully inserted."
),
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
@@ -223,9 +222,9 @@ def _enforceNotesRestrictions(self, value):
"crWithdrawnElevation",
units=units.CM,
description=(
- "The elevation of the tip of a control rod assembly when it is fully withdrawn. For a control rod assembly "
- "inserted from the top, this will be the lower tip of the bottom-most moveable section in the assembly when "
- "fully withdrawn."
+ "The elevation of the tip of a control rod assembly when it is fully withdrawn. "
+ "For a control rod assembly inserted from the top, this will be the lower tip of "
+ "the bottom-most moveable section in the assembly when fully withdrawn."
),
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
diff --git a/armi/reactor/blockParameters.py b/armi/reactor/blockParameters.py
index fe1eb895cc..3cda7532bf 100644
--- a/armi/reactor/blockParameters.py
+++ b/armi/reactor/blockParameters.py
@@ -33,8 +33,8 @@ def getBlockParameterDefinitions():
"orientation",
units=units.DEGREES,
description=(
- "Triple representing rotations counterclockwise around each spatial axis. For example, "
- "a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
+ "Triple representing rotations counterclockwise around each spatial axis. For "
+ "example, a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
),
default=None,
)
@@ -161,7 +161,10 @@ def getBlockParameterDefinitions():
pb.defParam(
"residence",
units=units.DAYS,
- description="Duration that a block has been in the core multiplied by the fraction of full power generated in that time.",
+ description=(
+ "Duration that a block has been in the core multiplied by the fraction "
+ "of full power generated in that time."
+ ),
categories=["cumulative"],
)
@@ -345,7 +348,10 @@ def buGroupNum(self, buGroupNum):
pb.defParam(
"axialExpTargetComponent",
units=units.UNITLESS,
- description="The name of the target component used for axial expansion and contraction of solid components.",
+ description=(
+ "The name of the target component used for axial expansion and "
+ "contraction of solid components."
+ ),
default="",
saveToDB=True,
)
@@ -625,12 +631,11 @@ def xsTypeNum(self, value):
pb.defParam(
"assemNum",
units=units.UNITLESS,
- description="Index that refers, nominally, to the assemNum parameter of "
- "the containing Assembly object. This is stored on the Block to aid in "
- "visualizing shuffle patterns and the like, and should not be used within "
- "the code. These are not guaranteed to be consistent with the containing "
- "Assembly, so they should not be used as a reliable means to reconstruct "
- "the model.",
+ description="Index that refers, nominally, to the assemNum parameter of the containing "
+ "Assembly object. This is stored on the Block to aid in visualizing shuffle patterns "
+ "and the like, and should not be used within the code. These are not guaranteed to be "
+ "consistent with the containing Assembly, so they should not be used as a reliable "
+ "means to reconstruct the model.",
categories=[parameters.Category.retainOnReplacement],
)
@@ -799,7 +804,10 @@ def xsTypeNum(self, value):
pb.defParam(
"smearDensity",
units=units.UNITLESS,
- description="Smear density of fuel pins in this block. Defined as the ratio of fuel area to total space inside cladding.",
+ description=(
+ "Smear density of fuel pins in this block. Defined as the ratio of fuel "
+ "area to total space inside cladding."
+ ),
location=ParamLocation.AVERAGE,
)
diff --git a/armi/reactor/blocks.py b/armi/reactor/blocks.py
index 97296c974a..99e0c1cf58 100644
--- a/armi/reactor/blocks.py
+++ b/armi/reactor/blocks.py
@@ -13,12 +13,10 @@
# limitations under the License.
"""
-Defines blocks, which are axial chunks of assemblies. They contain
-most of the state variables, including power, flux, and homogenized number densities.
+Defines blocks, which are axial chunks of assemblies. They contain most of the state variables,
+including power, flux, and homogenized number densities.
-Assemblies are made of blocks.
-
-Blocks are made of components.
+Assemblies are made of blocks. Blocks are made of components.
"""
from typing import Optional, Type, Tuple, ClassVar
import collections
@@ -87,8 +85,7 @@ def __init__(self, name: str, height: float = 1.0):
The name of this block
height : float, optional
- The height of the block in cm. Defaults to 1.0 so that
- `getVolume` assumes unit height.
+ The height of the block in cm. Defaults to 1.0 so that `getVolume` assumes unit height.
"""
composites.Composite.__init__(self, name)
self.p.height = height
@@ -163,12 +160,11 @@ def createHomogenizedCopy(self, pinSpatialLocators=False):
Notes
-----
- Used to implement a copy function for specific block types that can
- be much faster than a deepcopy by glossing over details that may be
- unnecessary in certain contexts.
+ Used to implement a copy function for specific block types that can be much faster than a
+ deepcopy by glossing over details that may be unnecessary in certain contexts.
- This base class implementation is just a deepcopy of the block, in full detail
- (not homogenized).
+ This base class implementation is just a deepcopy of the block, in full detail (not
+ homogenized).
"""
return copy.deepcopy(self)
@@ -221,10 +217,9 @@ def makeName(self, assemNum, axialIndex):
This also sets the block-level assembly-num param.
- Once, we used a axial-character suffix to represent the axial
- index, but this is inherently limited so we switched to a numerical
- name. The axial suffix needs can be brought in in plugins that require
- them.
+ Once, we used a axial-character suffix to represent the axial index, but this is inherently
+ limited so we switched to a numerical name. The axial suffix needs can be brought in to
+ plugins that require them.
Examples
--------
@@ -238,10 +233,10 @@ def getSmearDensity(self, cold=True):
"""
Compute the smear density of pins in this block.
- Smear density is the area of the fuel divided by the area of the space available
- for fuel inside the cladding. Other space filled with solid materials is not
- considered available. If all the area is fuel, it has 100% smear density. Lower
- smear density allows more room for swelling.
+ Smear density is the area of the fuel divided by the area of the space available for fuel
+ inside the cladding. Other space filled with solid materials is not considered available. If
+ all the area is fuel, it has 100% smear density. Lower smear density allows more room for
+ swelling.
.. warning:: This requires circular fuel and circular cladding. Designs that vary
from this will be wrong. It may make sense in the future to put this somewhere a
@@ -249,13 +244,12 @@ def getSmearDensity(self, cold=True):
Notes
-----
- This only considers circular objects. If you have a cladding that is not a circle,
- it will be ignored.
+ This only considers circular objects. If you have a cladding that is not a circle, it will
+ be ignored.
- Negative areas can exist for void gaps in the fuel pin. A negative area in a gap
- represents overlap area between two solid components. To account for this
- additional space within the pin cladding the abs(negativeArea) is added to the
- inner cladding area.
+ Negative areas can exist for void gaps in the fuel pin. A negative area in a gap represents
+ overlap area between two solid components. To account for this additional space within the
+ pin cladding the abs(negativeArea) is added to the inner cladding area.
Parameters
----------
@@ -299,7 +293,8 @@ def getSmearDensity(self, cold=True):
if c.isFuel():
fuelComponentArea += componentArea
elif c.hasFlags(Flags.SLUG):
- # this flag designates that this clad/slug combination isn't fuel and shouldn't be counted in the average
+ # this flag designates that this clad/slug combination isn't fuel and shouldn't be
+ # counted in the average
pass
else:
if c.containsSolidMaterial():
@@ -337,9 +332,8 @@ def autoCreateSpatialGrids(self):
Raises
------
ValueError
- If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions than necessary.
-
-
+ If the multiplicities of the block are not only 1 or N or if generated ringNumber leads
+ to more positions than necessary.
"""
raise NotImplementedError()
@@ -363,7 +357,7 @@ def getMgFlux(self, adjoint=False, average=False, volume=None, gamma=False):
volume: float, optional
If average=True, the volume-integrated flux is divided by volume before being returned.
- The user may specify a volume here, or the function will obtain the block volume directly.
+ The user may specify a volume, or the function will obtain the block volume directly.
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
@@ -391,8 +385,8 @@ def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False):
Parameters
----------
fluxes : 2-D list of floats
- The block-level pin multigroup fluxes. fluxes[g][i] represents the flux in group g for pin i.
- Flux units are the standard n/cm^2/s.
+ The block-level pin multigroup fluxes. fluxes[g][i] represents the flux in group g for
+ pin i. Flux units are the standard n/cm^2/s.
The "ARMI pin ordering" is used, which is counter-clockwise from 3 o'clock.
adjoint : bool, optional
Whether to set real or adjoint data.
@@ -402,8 +396,8 @@ def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False):
Outputs
-------
self.p.pinMgFluxes : 2-D array of floats
- The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g for pin i.
- Flux units are the standard n/cm^2/s.
+ The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g
+ for pin i. Flux units are the standard n/cm^2/s.
The "ARMI pin ordering" is used, which is counter-clockwise from 3 o'clock.
"""
pinFluxes = []
@@ -1612,18 +1606,19 @@ def setAxialExpTargetComp(self, targetComponent):
:id: I_ARMI_MANUAL_TARG_COMP
:implements: R_ARMI_MANUAL_TARG_COMP
- Sets the ``axialExpTargetComponent`` parameter on the block to the name
- of the Component which is passed in. This is then used by the
+ Sets the ``axialExpTargetComponent`` parameter on the block to the name of the Component
+ which is passed in. This is then used by the
:py:class:`~armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger`
class during axial expansion.
- This method is typically called from within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`
- during the process of building a Block from the blueprints.
+ This method is typically called from within
+ :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` during the
+ process of building a Block from the blueprints.
Parameter
---------
targetComponent: :py:class:`Component ` object
- component specified to be target component for axial expansion changer
+ Component specified to be target component for axial expansion changer
See Also
--------
@@ -1666,10 +1661,9 @@ class HexBlock(Block):
:id: I_ARMI_BLOCK_HEX
:implements: R_ARMI_BLOCK_HEX
- This class defines hexagonal-shaped Blocks. It inherits functionality from the parent
- class, Block, and defines hexagonal-specific methods including, but not limited to,
- querying pin pitch, pin linear power densities, hydraulic diameter, and retrieving
- inner and outer pitch.
+ This class defines hexagonal-shaped Blocks. It inherits functionality from the parent class,
+ Block, and defines hexagonal-specific methods including, but not limited to, querying pin
+ pitch, pin linear power densities, hydraulic diameter, and retrieving inner and outer pitch.
"""
PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = (components.Hexagon,)
@@ -2257,25 +2251,25 @@ def getSymmetryFactor(self):
def autoCreateSpatialGrids(self):
"""
- Given a block without a spatialGrid, create a spatialGrid and give its children
- the corresponding spatialLocators (if it is a simple block).
+ Given a block without a spatialGrid, create a spatialGrid and give its children the
+ corresponding spatialLocators (if it is a simple block).
- In this case, a simple block would be one that has either multiplicity of
- components equal to 1 or N but no other multiplicities. Also, this should only
- happen when N fits exactly into a given number of hex rings. Otherwise, do not
- create a grid for this block.
+ In this case, a simple block would be one that has either multiplicity of components equal
+ to 1 or N but no other multiplicities. Also, this should only happen when N fits exactly
+ into a given number of hex rings. Otherwise, do not create a grid for this block.
Notes
-----
- If the block meets all the conditions, we gather all components to either be a multiIndexLocation containing all
- of the pin positions, otherwise, locator is the center (0,0).
+ If the Block meets all the conditions, we gather all components to either be a
+ multiIndexLocation containing all of the pin positions, or the locator is the center (0,0).
Also, this only works on blocks that have 'flat side up'.
Raises
------
ValueError
- If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions than necessary.
+ If the multiplicities of the block are not only 1 or N or if generated ringNumber leads
+ to more positions than necessary.
"""
# Check multiplicities...
mults = {c.getDimension("mult") for c in self.iterComponents()}
@@ -2337,8 +2331,8 @@ def getPinPitch(self, cold=False):
"""
Get the pin pitch in cm.
- Assumes that the pin pitch is defined entirely by contacting cladding tubes
- and wire wraps. Grid spacers not yet supported.
+ Assumes that the pin pitch is defined entirely by contacting cladding tubes and wire wraps.
+ Grid spacers not yet supported.
.. impl:: Pin pitch within block is retrievable.
:id: I_ARMI_BLOCK_DIMS6
diff --git a/armi/reactor/blueprints/assemblyBlueprint.py b/armi/reactor/blueprints/assemblyBlueprint.py
index 8e13d1fcba..b388abe44a 100644
--- a/armi/reactor/blueprints/assemblyBlueprint.py
+++ b/armi/reactor/blueprints/assemblyBlueprint.py
@@ -15,12 +15,10 @@
"""
This module defines the blueprints input object for assemblies.
-In addition to defining the input format, the ``AssemblyBlueprint`` class is responsible
-for constructing ``Assembly`` objects. An attempt has been made to decouple ``Assembly``
-construction from the rest of ARMI as much as possible. For example, an assembly does
-not require a reactor to be constructed, or a geometry file (but uses contained Block
-geometry type as a surrogate).
-
+In addition to defining the input format, the ``AssemblyBlueprint`` class is responsible for
+constructing ``Assembly`` objects. An attempt has been made to decouple ``Assembly`` construction
+from the rest of ARMI as much as possible. For example, an assembly does not require a reactor to be
+constructed, or a geometry file (but uses contained Block geometry type as a surrogate).
"""
import yamlize
@@ -46,8 +44,8 @@ def _configureAssemblyTypes():
class Modifications(yamlize.Map):
"""
- The names of material modifications and lists of the modification values for
- each block in the assembly.
+ The names of material modifications and lists of the modification values for each block in the
+ assembly.
"""
key_type = yamlize.Typed(str)
@@ -55,10 +53,7 @@ class Modifications(yamlize.Map):
class ByComponentModifications(yamlize.Map):
- """
- The name of a component within the block and an associated Modifications
- object.
- """
+ """The name of a component within the block and an associated Modifications object."""
key_type = yamlize.Typed(str)
value_type = Modifications
@@ -68,35 +63,36 @@ class MaterialModifications(yamlize.Map):
"""
A yamlize map for reading and holding material modifications.
- A user may specify material modifications directly
- as keys/values on this class, in which case these material modifications will
- be blanket applied to the entire block.
+ A user may specify material modifications directly as keys/values on this class, in which case
+ these material modifications will be blanket applied to the entire block.
- If the user wishes to specify material modifications specific to a component
- within the block, they should use the `by component` attribute, specifying
- the keys/values underneath the name of a specific component in the block.
+ If the user wishes to specify material modifications specific to a component within the block,
+ they should use the `by component` attribute, specifying the keys/values underneath the name of
+ a specific component in the block.
.. impl:: User-impact on material definitions.
:id: I_ARMI_MAT_USER_INPUT0
:implements: R_ARMI_MAT_USER_INPUT
- Defines a yaml map attribute for the assembly portion of the blueprints
- (see :py:class:`~armi.blueprints.assemblyBlueprint.AssemblyBlueprint`) that
- allows users to specify material attributes as lists corresponding to
- each axial block in the assembly. Two types of specifications can be made:
-
- 1. Key-value pairs can be specified directly, where the key is the
- name of the modification and the value is the list of block values.
-
- 2. The "by component" attribute can be used, in which case the user
- can specify material attributes that are specific to individual components
- in each block. This is enabled through the :py:class:`~armi.reactor.blueprints.assemblyBlueprint.ByComponentModifications`
- class, which basically just allows for one additional layer of attributes
- corresponding to the component names.
-
- These material attributes can be used during the resolution of material
- classes during core instantiation (see :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`
- and :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`).
+ Defines a yaml map attribute for the assembly portion of the blueprints (see
+ :py:class:`~armi.blueprints.assemblyBlueprint.AssemblyBlueprint`) that allows users to
+ specify material attributes as lists corresponding to each axial block in the assembly. Two
+ types of specifications can be made:
+
+ 1. Key-value pairs can be specified directly, where the key is the name of the
+ modification and the value is the list of block values.
+
+ 2. The "by component" attribute can be used, in which case the user can specify material
+ attributes that are specific to individual components in each block. This is enabled
+ through the
+ :py:class:`~armi.reactor.blueprints.assemblyBlueprint.ByComponentModifications` class,
+ which basically just allows for one additional layer of attributes corresponding to the
+ component names.
+
+ These material attributes can be used during the resolution of material classes during core
+ instantiation (see
+ :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` and
+ :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`).
"""
key_type = yamlize.Typed(str)
diff --git a/armi/reactor/blueprints/blockBlueprint.py b/armi/reactor/blueprints/blockBlueprint.py
index 7c11ded564..f233fd58a9 100644
--- a/armi/reactor/blueprints/blockBlueprint.py
+++ b/armi/reactor/blueprints/blockBlueprint.py
@@ -48,25 +48,24 @@ class BlockBlueprint(yamlize.KeyedList):
:id: I_ARMI_BP_BLOCK
:implements: R_ARMI_BP_BLOCK
- Defines a yaml construct that allows the user to specify attributes of a
- block from within their blueprints file, including a name, flags, a radial
- grid to specify locations of pins, and the name of a component which
- drives the axial expansion of the block (see :py:mod:`~armi.reactor.converters.axialExpansionChanger`).
+ Defines a yaml construct that allows the user to specify attributes of a block from within
+ their blueprints file, including a name, flags, a radial grid to specify locations of pins,
+ and the name of a component which drives the axial expansion of the block (see
+ :py:mod:`~armi.reactor.converters.axialExpansionChanger`).
- In addition, the user may specify key-value pairs to specify the components
- contained within the block, where the keys are component names and the
- values are component blueprints (see :py:class:`~armi.reactor.blueprints.ComponentBlueprint.ComponentBlueprint`).
+ In addition, the user may specify key-value pairs to specify the components contained within
+ the block, where the keys are component names and the values are component blueprints (see
+ :py:class:`~armi.reactor.blueprints.ComponentBlueprint.ComponentBlueprint`).
- Relies on the underlying infrastructure from the ``yamlize`` package for
- reading from text files, serialization, and internal storage of the data.
+ Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
+ files, serialization, and internal storage of the data.
- Is implemented into a blueprints file by being imported and used
- as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`
- class.
+ Is implemented into a blueprints file by being imported and used as an attribute within the
+ larger :py:class:`~armi.reactor.blueprints.Blueprints` class.
Includes a ``construct`` method, which instantiates an instance of
- :py:class:`~armi.reactor.blocks.Block` with the characteristics
- as specified in the blueprints.
+ :py:class:`~armi.reactor.blocks.Block` with the characteristics as specified in the
+ blueprints.
"""
item_type = componentBlueprint.ComponentBlueprint
diff --git a/armi/reactor/blueprints/componentBlueprint.py b/armi/reactor/blueprints/componentBlueprint.py
index e1404cd2be..66caae6e3f 100644
--- a/armi/reactor/blueprints/componentBlueprint.py
+++ b/armi/reactor/blueprints/componentBlueprint.py
@@ -13,7 +13,8 @@
# limitations under the License.
"""
-This module defines the ARMI input for a component definition, and code for constructing an ARMI ``Component``.
+This module defines the ARMI input for a component definition, and code for constructing an ARMI
+``Component``.
Special logic is required for handling component links.
"""
@@ -69,11 +70,11 @@ def from_yaml(cls, loader, node, _rtd=None):
@classmethod
def to_yaml(cls, dumper, self, _rtd=None):
"""
- Override the ``Yamlizable.to_yaml`` to remove the object-like behavior, otherwise we'd end up with a
- ``{value: ...}`` dictionary.
+ Override the ``Yamlizable.to_yaml`` to remove the object-like behavior, otherwise we'd end
+ up with a ``{value: ...}`` dictionary.
- This allows someone to programmatically edit the component dimensions without using the ``ComponentDimension``
- class.
+ This allows someone to programmatically edit the component dimensions without using the
+ ``ComponentDimension`` class.
"""
if not isinstance(self, cls):
self = cls(self)
@@ -117,36 +118,36 @@ def __hash__(self):
class ComponentBlueprint(yamlize.Object):
"""
- This class defines the inputs necessary to build ARMI component objects. It uses ``yamlize`` to enable serialization
- to and from YAML.
+ This class defines the inputs necessary to build ARMI component objects. It uses ``yamlize`` to
+ enable serialization to and from YAML.
.. impl:: Construct component from blueprint file.
:id: I_ARMI_BP_COMP
:implements: R_ARMI_BP_COMP
- Defines a yaml construct that allows the user to specify attributes of a
- component from within their blueprints file, including a name, flags, shape,
- material and/or isotopic vector, input temperature, corresponding component dimensions,
- and ID for placement in a block lattice (see :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`).
- Component dimensions that can be defined for a given component are dependent
- on the component's ``shape`` attribute, and the dimensions defining each
- shape can be found in the :py:mod:`~armi.reactor.components` module.
+ Defines a yaml construct that allows the user to specify attributes of a component from
+ within their blueprints file, including a name, flags, shape, material and/or isotopic
+ vector, input temperature, corresponding component dimensions, and ID for placement in a
+ block lattice (see :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`).
+ Component dimensions that can be defined for a given component are dependent on the
+ component's ``shape`` attribute, and the dimensions defining each shape can be found in the
+ :py:mod:`~armi.reactor.components` module.
- Limited validation on the inputs is performed to ensure that the component
- shape corresponds to a valid shape defined by the ARMI application.
+ Limited validation on the inputs is performed to ensure that the component shape corresponds
+ to a valid shape defined by the ARMI application.
- Relies on the underlying infrastructure from the ``yamlize`` package for
- reading from text files, serialization, and internal storage of the data.
+ Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
+ files, serialization, and internal storage of the data.
- Is implemented as part of a blueprints file by being imported and used
- as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`
- class. Can also be used within the :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`
- class to enable specification of components directly within the "blocks"
- portion of the blueprint file.
+ Is implemented as part of a blueprints file by being imported and used as an attribute
+ within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class. Can also be used
+ within the :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint` class to
+ enable specification of components directly within the "blocks" portion of the blueprint
+ file.
Includes a ``construct`` method, which instantiates an instance of
- :py:class:`~armi.reactor.components.component.Component` with the characteristics
- specified in the blueprints (see :need:`I_ARMI_MAT_USER_INPUT1`).
+ :py:class:`~armi.reactor.components.component.Component` with the characteristics specified
+ in the blueprints (see :need:`I_ARMI_MAT_USER_INPUT1`).
"""
name = yamlize.Attribute(type=str)
@@ -340,21 +341,21 @@ def insertDepletableNuclideKeys(c, blueprint):
This is called during the component construction process for each component from within
:py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`.
- For a given initialized component, check its flags to determine if it
- has been marked as depletable. If it is, use :py:func:`~armi.nucDirectory.nuclideBases.initReachableActiveNuclidesThroughBurnChain`
- to apply the user-specifications in the "nuclide flags" section of the blueprints
- to the component such that all active isotopes and derivatives of those
- isotopes in the burn chain are initialized to have an entry in the component's
- ``numberDensities`` dictionary.
+ For a given initialized component, check its flags to determine if it has been marked as
+ depletable. If it is, use
+ :py:func:`~armi.nucDirectory.nuclideBases.initReachableActiveNuclidesThroughBurnChain` to
+ apply the user-specifications in the "nuclide flags" section of the blueprints to the
+ Component such that all active isotopes and derivatives of those isotopes in the burn chain
+ are initialized to have an entry in the component's ``numberDensities`` dictionary.
- Note that certain case settings, including ``fpModel`` and ``fpModelLibrary``,
- may trigger modifications to the active nuclides specified by the user
- in the "nuclide flags" section of the blueprints.
+ Note that certain case settings, including ``fpModel`` and ``fpModelLibrary``, may trigger
+ modifications to the active nuclides specified by the user in the "nuclide flags" section of
+ the blueprints.
Notes
-----
- This should be moved to a neutronics/depletion plugin hook but requires some
- refactoring in how active nuclides and reactors are initialized first.
+ This should be moved to a neutronics/depletion plugin hook but requires some refactoring in how
+ active nuclides and reactors are initialized first.
See Also
--------
@@ -375,8 +376,8 @@ class ComponentKeyedList(yamlize.KeyedList):
This is used within the ``components:`` main entry of the blueprints.
- This is *not* (yet) used when components are defined within a block blueprint.
- That is handled in the blockBlueprint construct method.
+ This is *not* (yet) used when components are defined within a block blueprint. That is handled
+ in the blockBlueprint construct method.
"""
item_type = ComponentBlueprint
diff --git a/armi/reactor/blueprints/gridBlueprint.py b/armi/reactor/blueprints/gridBlueprint.py
index 0fe0124542..2d0062ee90 100644
--- a/armi/reactor/blueprints/gridBlueprint.py
+++ b/armi/reactor/blueprints/gridBlueprint.py
@@ -14,20 +14,17 @@
"""
Input definitions for Grids.
-Grids are given names which can be referred to on other input structures
-(like core maps and pin maps).
+Grids are given names which can be referred to on other input structures (like core maps and pin
+maps).
-These are in turn interpreted into concrete things at lower levels. For
-example:
+These are in turn interpreted into concrete things at lower levels. For example:
-* Core map lattices get turned into :py:mod:`armi.reactor.grids`,
- which get set to ``core.spatialGrid``.
-* Block pin map lattices get applied to the components to provide
- some subassembly spatial details
+* Core map lattices get turned into :py:mod:`armi.reactor.grids`, which get set to
+ ``core.spatialGrid``.
+* Block pin map lattices get applied to the components to provide some subassembly spatial details.
-Lattice inputs here are floating in space. Specific dimensions
-and anchor points are handled by the lower-level objects definitions. This
-is intended to maximize lattice reusability.
+Lattice inputs here are floating in space. Specific dimensions and anchor points are handled by the
+lower-level objects definitions. This is intended to maximize lattice reusability.
See Also
--------
@@ -102,7 +99,6 @@
IC MC MC OC RR
IC IC MC PC RR SH
-
"""
import copy
from io import StringIO
@@ -138,33 +134,32 @@ class GridBlueprint(yamlize.Object):
"""
A grid input blueprint.
- These directly build Grid objects and contain information about
- how to populate the Grid with child ArmiObjects for the Reactor Model.
+ These directly build Grid objects and contain information about how to populate the Grid with
+ child ArmiObjects for the Reactor Model.
- The grids get origins either from a parent block (for pin lattices)
- or from a System (for Cores, SFPs, and other components).
+ The grids get origins either from a parent block (for pin lattices) or from a System (for Cores,
+ SFPs, and other components).
.. impl:: Define a lattice map in reactor core.
:id: I_ARMI_BP_GRID
:implements: R_ARMI_BP_GRID
- Defines a yaml construct that allows the user to specify a grid
- from within their blueprints file, including a name, geometry, dimensions,
- symmetry, and a map with the relative locations of components within that grid.
+ Defines a yaml construct that allows the user to specify a grid from within their blueprints
+ file, including a name, geometry, dimensions, symmetry, and a map with the relative
+ locations of components within that grid.
- Relies on the underlying infrastructure from the ``yamlize`` package for
- reading from text files, serialization, and internal storage of the data.
+ Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
+ files, serialization, and internal storage of the data.
- Is implemented as part of a blueprints file by being used in key-value pairs
- within the :py:class:`~armi.reactor.blueprints.gridBlueprint.Grid` class,
- which is imported and used as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`
- class.
+ Is implemented as part of a blueprints file by being used in key-value pairs within the
+ :py:class:`~armi.reactor.blueprints.gridBlueprint.Grid` class, which is imported and used as
+ an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class.
- Includes a ``construct`` method, which instantiates an instance of one
- of the subclasses of :py:class:`~armi.reactor.grids.structuredgrid.StructuredGrid`.
- This is typically called from within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`,
- which then also associates the individual components in the block with
- locations specifed in the grid.
+ Includes a ``construct`` method, which instantiates an instance of one of the subclasses of
+ :py:class:`~armi.reactor.grids.structuredgrid.StructuredGrid`. This is typically called from
+ within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`, which
+ then also associates the individual components in the block with locations specifed in the
+ grid.
Attributes
----------
@@ -175,16 +170,16 @@ class GridBlueprint(yamlize.Object):
latticeMap : str
An asciimap representation of the lattice contents
latticeDimensions : Triplet
- An x/y/z Triplet with grid dimensions in cm. This is used to specify a uniform
- grid, such as Cartesian or Hex. Mutually exclusive with gridBounds.
+ An x/y/z Triplet with grid dimensions in cm. This is used to specify a uniform grid, such as
+ Cartesian or Hex. Mutually exclusive with gridBounds.
gridBounds : dict
- A dictionary containing explicit grid boundaries. Specific keys used will depend
- on the type of grid being defined. Mutually exclusive with latticeDimensions.
+ A dictionary containing explicit grid boundaries. Specific keys used will depend on the type
+ of grid being defined. Mutually exclusive with latticeDimensions.
symmetry : str
A string defining the symmetry mode of the grid
gridContents : dict
- A {(i,j): str} dictionary mapping spatialGrid indices
- in 2-D to string specifiers of what's supposed to be in the grid.
+ A {(i,j): str} dictionary mapping spatialGrid indices in 2-D to string specifiers of what's
+ supposed to be in the grid.
"""
name = yamlize.Attribute(key="name", type=str)
@@ -203,10 +198,9 @@ class GridBlueprint(yamlize.Object):
)
),
)
- # gridContents is the final form of grid contents information;
- # it is set regardless of how the input is read. When writing, we attempt to
- # preserve the input mode and write ascii map if that was what was originally
- # provided.
+ # gridContents is the final form of grid contents information; it is set regardless of how the
+ # input is read. When writing, we attempt to preserve the input mode and write ascii map if that
+ # was what was originally provided.
gridContents = yamlize.Attribute(key="grid contents", type=dict, default=None)
@gridContents.validator
@@ -242,9 +236,9 @@ def __init__(
setattr this is only needed for when you want to make this object from a non-YAML
source.
- .. warning:: This is a Yamlize object, so ``__init__`` never really gets called.
- Only ``__new__`` does.
-
+ Warning
+ -------
+ This is a Yamlize object, so ``__init__`` never really gets called. Only ``__new__`` does.
"""
self.name = name
self.geom = str(geom)
@@ -257,8 +251,8 @@ def __init__(
@property
def readFromLatticeMap(self):
"""
- This is implemented as a property, since as a Yamlize object, __init__ is not
- always called and we have to lazily evaluate its default value.
+ This is implemented as a property, since as a Yamlize object, __init__ is not always called
+ and we have to lazily evaluate its default value.
"""
return getattr(self, "_readFromLatticeMap", False)
@@ -276,9 +270,8 @@ def _constructSpatialGrid(self):
"""
Build spatial grid.
- If you do not enter latticeDimensions, a unit grid will be produced which must
- be adjusted to the proper dimensions (often by inspection of children) at a
- later time.
+ If you do not enter latticeDimensions, a unit grid will be produced which must be adjusted
+ to the proper dimensions (often by inspection of children) at a later time.
"""
symmetry = (
geometry.SymmetryType.fromStr(self.symmetry) if self.symmetry else None
@@ -368,11 +361,12 @@ def expandToFull(self):
"""
Unfold the blueprints to represent full symmetry.
- .. note:: This relatively rudimentary, and copies entries from the
- currently-represented domain to their corresponding locations in full
- symmetry. This may not produce the desired behavior for some scenarios,
- such as when expanding fuel shuffling paths or the like. Future work may
- make this more sophisticated.
+ Notes
+ -----
+ This relatively rudimentary, and copies entries from the currently-represented domain to
+ their corresponding locations in full symmetry. This may not produce the desired behavior
+ for some scenarios, such as when expanding fuel shuffling paths or the like. Future work may
+ make this more sophisticated.
"""
if (
geometry.SymmetryType.fromAny(self.symmetry).domain
@@ -422,8 +416,8 @@ def _readGridContents(self):
def _readGridContentsLattice(self):
"""Read an ascii map of grid contents.
- This update the gridContents attribute, which is a dict mapping grid i,j,k
- indices to textual specifiers (e.g. ``IC``))
+ This update the gridContents attribute, which is a dict mapping grid i,j,k indices to
+ textual specifiers (e.g. ``IC``))
"""
self.readFromLatticeMap = True
symmetry = geometry.SymmetryType.fromStr(self.symmetry)
@@ -547,21 +541,21 @@ def saveToStream(stream, bluep, full=False, tryMap=False):
"""
Save the blueprints to the passed stream.
- This can save either the entire blueprints, or just the `grids:` section of the
- blueprints, based on the passed ``full`` argument. Saving just the grid
- blueprints can be useful when cobbling blueprints together with !include flags.
+ This can save either the entire blueprints, or just the `grids:` section of the blueprints,
+ based on the passed ``full`` argument. Saving just the grid blueprints can be useful when
+ cobbling blueprints together with !include flags.
.. impl:: Write a blueprint file from a blueprint object.
:id: I_ARMI_BP_TO_DB
:implements: R_ARMI_BP_TO_DB
- First makes a copy of the blueprints that are passed in. Then modifies
- any grids specified in the blueprints into a canonical lattice map style,
- if needed. Then uses the ``dump`` method that is inherent to all ``yamlize``
- subclasses to write the blueprints to the given ``stream`` object.
+ First makes a copy of the blueprints that are passed in. Then modifies any grids specified
+ in the blueprints into a canonical lattice map style, if needed. Then uses the ``dump``
+ method that is inherent to all ``yamlize`` subclasses to write the blueprints to the given
+ ``stream`` object.
- If called with the ``full`` argument, the entire blueprints is dumped.
- If not, only the grids portion is dumped.
+ If called with the ``full`` argument, the entire blueprints is dumped. If not, only the
+ grids portion is dumped.
Parameters
----------
diff --git a/armi/reactor/blueprints/reactorBlueprint.py b/armi/reactor/blueprints/reactorBlueprint.py
index 40e802cc57..317d7f4a2c 100644
--- a/armi/reactor/blueprints/reactorBlueprint.py
+++ b/armi/reactor/blueprints/reactorBlueprint.py
@@ -15,18 +15,17 @@
"""
Definitions of top-level reactor arrangements like the Core (default), SFP, etc.
-See documentation of blueprints in :doc:`/user/inputs/blueprints` for more context. See
-example in :py:mod:`armi.reactor.blueprints.tests.test_reactorBlueprints`.
+See documentation of blueprints in :doc:`/user/inputs/blueprints` for more context. See example in
+:py:mod:`armi.reactor.blueprints.tests.test_reactorBlueprints`.
-This was built to replace the old system that loaded the core geometry from the
-cs['geometry'] setting. Until the geom file-based input is completely removed, this
-system will attempt to migrate the core layout from geom files. When geom files are
-used, explicitly specifying a ``core`` system will result in an error.
+This was built to replace the old system that loaded the core geometry from the cs['geometry']
+setting. Until the geom file-based input is completely removed, this system will attempt to migrate
+the core layout from geom files. When geom files are used, explicitly specifying a ``core`` system
+will result in an error.
-System Blueprints are a big step in the right direction to generalize user input, but
-was still mostly adapted from the old Core layout input. As such, they still only really
-support Core-like systems. Future work should generalize the concept of "system" to more
-varied scenarios.
+System Blueprints are a big step in the right direction to generalize user input, but was still
+mostly adapted from the old Core layout input. As such, they still only really support Core-like
+systems. Future work should generalize the concept of "system" to more varied scenarios.
See Also
--------
@@ -61,17 +60,20 @@ class SystemBlueprint(yamlize.Object):
which is in turn included into the overall blueprints within
:py:class:`~armi.reactor.blueprints.Blueprints`.
- This class includes a :py:meth:`~armi.reactor.blueprints.reactorBlueprint.SystemBlueprint.construct`
- method, which is typically called from within :py:func:`~armi.reactor.reactors.factory`
- during the initialization of the reactor object to instantiate the core
- and/or spent fuel pool objects. During that process, a spatial grid is
- constructed based on the grid blueprints specified in the "grids" section
- of the blueprints (see :need:`I_ARMI_BP_GRID`) and the assemblies needed
- to fill the lattice are built from blueprints using :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.
-
- .. note:: We use string keys to link grids to objects that use them. This differs
- from how blocks/assembies are specified, which use YAML anchors. YAML anchors
- have proven to be problematic and difficult to work with
+ This class includes a
+ :py:meth:`~armi.reactor.blueprints.reactorBlueprint.SystemBlueprint.construct` method, which
+ is typically called from within :py:func:`~armi.reactor.reactors.factory` during the
+ initialization of the reactor object to instantiate the core and/or spent fuel pool objects.
+ During that process, a spatial grid is constructed based on the grid blueprints specified in
+ the "grids" section of the blueprints (see :need:`I_ARMI_BP_GRID`) and the assemblies needed
+ to fill the lattice are built from blueprints using
+ :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.
+
+ Notes
+ -----
+ We use string keys to link grids to objects that use them. This differs from how blocks/
+ assembies are specified, which use YAML anchors. YAML anchors have proven to be problematic and
+ difficult to work with.
"""
name = yamlize.Attribute(key="name", type=str)
diff --git a/armi/reactor/converters/geometryConverters.py b/armi/reactor/converters/geometryConverters.py
index ed1ba1011b..58755be4ba 100644
--- a/armi/reactor/converters/geometryConverters.py
+++ b/armi/reactor/converters/geometryConverters.py
@@ -1019,22 +1019,24 @@ def _writeRadialThetaZoneHeader(
)
)
runLog.debug(
- "{} Axial Zone - Axial Height (cm) Block Number Block Type XS ID : Original Hex Block XS ID(s)".format(
- 9 * STR_SPACE
- )
+ "{} Axial Zone - Axial Height (cm) Block Number Block Type XS ID : "
+ "Original Hex Block XS ID(s)".format(9 * STR_SPACE)
)
runLog.debug(
- "{} ---------- - ----------------- ------------ ---------------------- ----- : ---------------------------".format(
- 9 * STR_SPACE
- )
+ "{} ---------- - ----------------- ------------ ---------------------- ----- : "
+ "---------------------------".format(9 * STR_SPACE)
)
def _writeRadialThetaZoneInfo(self, axIdx, axialSegmentHeight, blockObj):
- """Create a summary of the mapping between the converted reactor block ids to the hex reactor block ids."""
+ """
+ Create a summary of the mapping between the converted reactor block ids to the hex
+ reactor block ids.
+ """
self._newBlockNum += 1
hexBlockXsIds = []
for hexBlock in self.blockMap[blockObj]:
hexBlockXsIds.append(hexBlock.getMicroSuffix())
+
runLog.debug(
"{} {:<10} - {:<17.3f} {:<12} {:<22} {:<5} : {}".format(
9 * STR_SPACE,
@@ -1061,14 +1063,13 @@ def plotConvertedReactor(self, fNameBase=None):
Parameters
----------
fNameBase : str, optional
- A name that will form the basis of the N plots that
- are generated by this method. Will get split on extension
- and have numbers added. Should be like ``coreMap.png``.
+ A name that will form the basis of the N plots that are generated by this method. Will
+ get split on extension and have numbers added. Should be like ``coreMap.png``.
Notes
-----
- XTView can be used to view the RZT reactor but this is useful to examine the
- conversion of the hex-z reactor to the rzt reactor.
+ XTView can be used to view the RZT reactor but this is useful to examine the conversion of
+ the hex-z reactor to the rzt reactor.
This makes plots of each individual theta mesh
"""
@@ -1147,6 +1148,7 @@ def plotConvertedReactor(self, fNameBase=None):
else:
figs.append(fig)
innerTheta = outerTheta
+
return figs
def _getReactorMeshCoordinates(self):
@@ -1275,19 +1277,16 @@ def convert(self, r):
:id: I_ARMI_THIRD_TO_FULL_CORE0
:implements: R_ARMI_THIRD_TO_FULL_CORE
- This method first checks if the input reactor is already full core.
- If full-core symmetry is detected, the input reactor is returned.
- If not, it then verifies that the input reactor has the expected one-third
- core symmetry and HEX geometry.
+ This method first checks if the input reactor is already full core. If full-core
+ symmetry is detected, the input reactor is returned. If not, it then verifies that the
+ input reactor has the expected one-third core symmetry and HEX geometry.
- Upon conversion, it loops over the assembly vector of the source
- one-third core model, copies and rotates each source assembly to create
- new assemblies, and adds them on the full-core grid. For the center assembly,
- it modifies its parameters.
+ Upon conversion, it loops over the assembly vector of the source one-third core model,
+ copies and rotates each source assembly to create new assemblies, and adds them on the
+ full-core grid. For the center assembly, it modifies its parameters.
Finally, it sets the domain type to full core.
-
Parameters
----------
sourceReactor : Reactor object
@@ -1380,10 +1379,10 @@ def restorePreviousGeometry(self, r=None):
:id: I_ARMI_THIRD_TO_FULL_CORE1
:implements: R_ARMI_THIRD_TO_FULL_CORE
- This method is a reverse process of the method ``convert``. It converts
- the full-core reactor model back to the original one-third core reactor model by removing
- the added assemblies and changing the parameters of the center
- assembly from full core to one third core.
+ This method is a reverse process of the method ``convert``. It converts the full-core
+ reactor model back to the original one-third core reactor model by removing the added
+ assemblies and changing the parameters of the center assembly from full core to one
+ third core.
"""
r = r or self._sourceReactor
@@ -1426,10 +1425,10 @@ def addEdgeAssemblies(self, core):
:id: I_ARMI_ADD_EDGE_ASSEMS0
:implements: R_ARMI_ADD_EDGE_ASSEMS
- Edge assemblies on the 120-degree symmetric line of a one-third core reactor model are added
- because they are needed for DIF3D-finite difference or MCNP models. This is done
- by copying the assemblies from the lower boundary and placing them in their
- reflective positions on the upper boundary of the symmetry line.
+ Edge assemblies on the 120-degree symmetric line of a one-third core reactor model are
+ added because they are needed for DIF3D-finite difference or MCNP models. This is done
+ by copying the assemblies from the lower boundary and placing them in their reflective
+ positions on the upper boundary of the symmetry line.
Parameters
----------
@@ -1494,16 +1493,15 @@ def removeEdgeAssemblies(self, core):
"""
Remove the edge assemblies in preparation for the nodal diffusion approximation.
- This makes use of the assemblies knowledge of if it is in a region that it
- needs to be removed.
+ This makes use of the assemblies knowledge of if it is in a region that it needs to be
+ removed.
.. impl:: Remove assemblies along the 120-degree line from a reactor.
:id: I_ARMI_ADD_EDGE_ASSEMS1
:implements: R_ARMI_ADD_EDGE_ASSEMS
- This method is the reverse process of the method ``addEdgeAssemblies``. It is
- needed for the DIF3D-Nodal calculation. It removes the assemblies on the 120-degree
- symmetry line.
+ This method is the reverse process of the method ``addEdgeAssemblies``. It is needed for
+ the DIF3D-Nodal calculation. It removes the assemblies on the 120-degree symmetry line.
See Also
--------
@@ -1515,8 +1513,8 @@ def removeEdgeAssemblies(self, core):
assembliesOnLowerBoundary = core.getAssembliesOnSymmetryLine(
grids.BOUNDARY_0_DEGREES
)
- # don't use newAssembliesAdded b/c this may be BOL cleaning of a fresh
- # case that has edge assems
+ # Don't use newAssembliesAdded b/c this may be BOL cleaning of a fresh case that has edge
+ # assems.
edgeAssemblies = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES)
for a in edgeAssemblies:
runLog.debug(
diff --git a/armi/reactor/converters/tests/test_axialExpansionChanger.py b/armi/reactor/converters/tests/test_axialExpansionChanger.py
index 49a2d7cfd2..5b22ba291c 100644
--- a/armi/reactor/converters/tests/test_axialExpansionChanger.py
+++ b/armi/reactor/converters/tests/test_axialExpansionChanger.py
@@ -864,7 +864,8 @@ def test_coldAssemblyExpansion(self):
Notes
-----
For R_ARMI_INP_COLD_HEIGHT, the action of axial expansion occurs in setUp() during core
- construction, specifically in :py:meth:`constructAssem `
+ construction, specifically in
+ :py:meth:`constructAssem `
Two assertions here:
1. total assembly height should be preserved (through use of top dummy block)
@@ -913,14 +914,15 @@ def test_coldAssemblyExpansion(self):
def checkColdHeightBlockMass(
self, bStd: HexBlock, bExp: HexBlock, flagType: Flags, nuclide: str
):
- """Checks that nuclide masses for blocks with input cold heights and "inputHeightsConsideredHot": True are underpredicted.
+ """Checks that nuclide masses for blocks with input cold heights and
+ "inputHeightsConsideredHot": True are underpredicted.
Notes
-----
- If blueprints have cold blocks heights with "inputHeightsConsideredHot": True in the inputs, then
- the nuclide densities are thermally expanded but the block height is not. This ultimately results in
- nuclide masses being underpredicted relative to the case where both nuclide densities and block heights
- are thermally expanded.
+ If blueprints have cold blocks heights with "inputHeightsConsideredHot": True in the inputs,
+ then the nuclide densities are thermally expanded but the block height is not. This
+ ultimately results in nuclide masses being underpredicted relative to the case where both
+ nuclide densities and block heights are thermally expanded.
"""
# custom materials don't expand
if not isinstance(bStd.getComponent(flagType).material, custom.Custom):
@@ -976,7 +978,8 @@ def runTest(
Notes
-----
- components "typeA" and "typeB" are assumed to be vertically stacked
- - two assertions: 1) comparing "typeB" component to "typeA"; 2) comparing "typeA" component to "typeB"
+ - two assertions: 1) comparing "typeB" component to "typeA"; 2) comparing "typeA" component
+ to "typeB"
- the different assertions are particularly useful for comparing two annuli
- to add Component class types to a test:
Add dictionary entry with following:
diff --git a/armi/reactor/converters/tests/test_geometryConverters.py b/armi/reactor/converters/tests/test_geometryConverters.py
index 0e31b84c8e..7863f96bfe 100644
--- a/armi/reactor/converters/tests/test_geometryConverters.py
+++ b/armi/reactor/converters/tests/test_geometryConverters.py
@@ -39,7 +39,7 @@ def setUp(self):
self.cs = self.o.cs
def test_addRing(self):
- """Tests that the addRing method adds the correct number of fuel assemblies to the test reactor."""
+ """Tests that ``addRing`` adds the correct number of fuel assemblies to the test reactor."""
converter = geometryConverters.FuelAssemNumModifier(self.cs)
converter.numFuelAssems = 7
converter.ringsToAdd = 1 * ["radial shield"]
@@ -48,7 +48,7 @@ def test_addRing(self):
numAssems = len(self.r.core.getAssemblies())
self.assertEqual(
numAssems, 13
- ) # should wind up with 6 reflector assemblies per 1/3rd core
+ ) # should end up with 6 reflector assemblies per 1/3rd Core
locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(4, 1)
shieldtype = self.r.core.childrenByLocator[locator].getType()
self.assertEqual(
@@ -64,7 +64,7 @@ def test_addRing(self):
) # should wind up with 11 reflector assemblies per 1/3rd core
def test_setNumberOfFuelAssems(self):
- """Tests that the setNumberOfFuelAssems method properly changes the number of fuel assemblies."""
+ """Tests that ``setNumberOfFuelAssems~`` properly changes the number of fuel assemblies."""
# tests ability to add fuel assemblies
converter = geometryConverters.FuelAssemNumModifier(self.cs)
converter.numFuelAssems = 60
@@ -450,12 +450,12 @@ def test_initNewFullReactor(self):
self.assertEqual(newR.core.symmetry.domain, geometry.DomainType.FULL_CORE)
def test_skipGrowToFullCoreWhenAlreadyFullCore(self):
- """Test that hex core is not modified when third core to full core changer is called on an already full core geometry.
+ """Test that hex core is not modified when third core to full core changer is called on an
+ already full core geometry.
.. test: Convert a one-third core to full core and restore back to one-third core.
:id: T_ARMI_THIRD_TO_FULL_CORE2
:tests: R_ARMI_THIRD_TO_FULL_CORE
-
"""
# Check the initialization of the third core model and convert to a full core
self.assertFalse(self.r.core.isFullCore)
diff --git a/armi/reactor/parameters/parameterDefinitions.py b/armi/reactor/parameters/parameterDefinitions.py
index a1d6c3d884..f7e7153062 100644
--- a/armi/reactor/parameters/parameterDefinitions.py
+++ b/armi/reactor/parameters/parameterDefinitions.py
@@ -15,12 +15,11 @@
r"""
This module contains the code necessary to represent parameter definitions.
-``ParameterDefinition``\ s are the metadata that describe specific parameters, and aid in
-enforcing certain rules upon the parameters themselves and the parameter collections
-that contain them.
+``ParameterDefinition``\ s are the metadata that describe specific parameters, and aid in enforcing
+certain rules upon the parameters themselves and the parameter collections that contain them.
-This module also describes the ``ParameterDefinitionCollection`` class, which serves as
-a specialized container to manage related parameter definitions.
+This module also describes the ``ParameterDefinitionCollection`` class, which serves as a
+specialized container to manage related parameter definitions.
See Also
--------
@@ -42,8 +41,7 @@
# Note that the various operations are responsible for clearing the flags on the events.
# These should be interpreted as:
# The Parameter or ParameterCollection has been modified SINCE_
-# In order for that to happen, the flags need to be cleared when the
-# begins.
+# In order for that to happen, the flags need to be cleared when the begins.
SINCE_INITIALIZATION = 1
SINCE_LAST_DISTRIBUTE_STATE = 4
SINCE_LAST_GEOMETRY_TRANSFORMATION = 8
@@ -69,9 +67,12 @@ class Category:
* `fluxQuantities` parameters are related to neutron or gamma flux
* `neutronics` parameters are calculated in a neutronics global flux solve
* `gamma` parameters are calculated in a fixed-source gamma solve
- * `detailedAxialExpansion` parameters are marked as such so that they are mapped from the uniform mesh back to the non-uniform mesh
- * `reactivity coefficients` parameters are related to reactivity coefficient or kinetics parameters for kinetics solutions
- * `thermal hydraulics` parameters come from a thermal hydraulics physics plugin (e.g., flow rates, temperatures, etc.)
+ * `detailedAxialExpansion` parameters are marked as such so that they are mapped from the
+ uniform mesh back to the non-uniform mesh
+ * `reactivity coefficients` parameters are related to reactivity coefficient or kinetics
+ parameters for kinetics solutions
+ * `thermal hydraulics` parameters come from a thermal hydraulics physics plugin (e.g., flow
+ rates, temperatures, etc.)
"""
depletion = "depletion"
@@ -104,7 +105,9 @@ class ParamLocation(enum.Flag):
class NoDefault:
- """Class used to allow distinction between not setting a default and setting a default of ``None``."""
+ """Class used to allow distinction between not setting a default and setting a default of
+ ``None``.
+ """
def __init__(self):
raise NotImplementedError("You cannot create an instance of NoDefault")
@@ -121,13 +124,12 @@ class Serializer:
r"""
Abstract class describing serialize/deserialize operations for Parameter data.
- Parameters need to be stored to and read from database files. This currently
- requires that the Parameter data be converted to a numpy array of a datatype
- supported by the ``h5py`` package. Some parameters may contain data that are not
- trivially representable in numpy/HDF5, and need special treatment. Subclassing
- ``Serializer`` and setting it as a ``Parameter``\ s ``serializer`` allows for special
- operations to be performed on the parameter values as they are stored to the
- database or read back in.
+ Parameters need to be stored to and read from database files. This currently requires that the
+ Parameter data be converted to a numpy array of a datatype supported by the ``h5py`` package.
+ Some parameters may contain data that are not trivially representable in numpy/HDF5, and need
+ special treatment. Subclassing ``Serializer`` and setting it as a ``Parameter``\ s
+ ``serializer`` allows for special operations to be performed on the parameter values as they are
+ stored to the database or read back in.
The ``Database3`` already knows how to handle certain cases where the data are not
straightforward to get into a numpy array, such as when:
@@ -137,35 +139,33 @@ class Serializer:
- The dimensions of the values stored on each object are inconsistent (e.g.,
"jagged" arrays)
- So, in these cases, a Serializer is not needed. Serializers are necessary for when
- the actual data need to be converted to a native data type (e.g., int, float, etc.)
- For example, we use a Serializer to handle writing ``Flags`` to the Database, as
- they tend to be too big to fit into a system-native integer.
+ So, in these cases, a Serializer is not needed. Serializers are necessary for when the actual
+ data need to be converted to a native data type (e.g., int, float, etc). For example, we use a
+ Serializer to handle writing ``Flags`` to the Database, as they tend to be too big to fit into a
+ system-native integer.
.. important::
- Defining a Serializer for a Parameter in part defines the underlying
- representation of the data within a database file; the data stored in a database
- are sensitive to the code that wrote them. Changing the method that a Serializer
- uses to pack or unpack data may break compatibility with old database files.
- Therefore, Serializers should be diligent about signalling changes by updating
- their version. It is also good practice, whenever possible, to support reading
- old versions so that database files written by old versions can still be read.
+ Defining a Serializer for a Parameter in part defines the underlying representation of the
+ data within a database file; the data stored in a database are sensitive to the code that
+ wrote them. Changing the method that a Serializer uses to pack or unpack data may break
+ compatibility with old database files. Therefore, Serializers should be diligent about
+ signalling changes by updating their version. It is also good practice, whenever possible,
+ to support reading old versions so that database files written by old versions can still be
+ read.
.. impl:: Users can define custom parameter serializers.
:id: I_ARMI_PARAM_SERIALIZE
:implements: R_ARMI_PARAM_SERIALIZE
- Important physical parameters are stored in every ARMI object.
- These parameters represent the plant's state during execution
- of the model. Currently, this requires that the parameters be serializable to a
- numpy array of a datatype supported by the ``h5py`` package so that the data can
- be written to, and subsequently read from, an HDF5 file.
+ Important physical parameters are stored in every ARMI object. These parameters represent
+ the plant's state during execution of the model. Currently, this requires that the
+ parameters be serializable to a numpy array of a datatype supported by the ``h5py`` package
+ so that the data can be written to, and subsequently read from, an HDF5 file.
- This class allows for these parameters to be serialized in a custom manner by
- providing interfaces for packing and unpacking parameter data. The user or
- downstream plugin is able to specify how data is serialized if that data is not
- naturally serializable.
+ This class allows for these parameters to be serialized in a custom manner by providing
+ interfaces for packing and unpacking parameter data. The user or downstream plugin is able
+ to specify how data is serialized if that data is not naturally serializable.
See Also
--------
@@ -174,20 +174,19 @@ class Serializer:
armi.reactor.flags.FlagSerializer
"""
- # This will accompany the packed data as an attribute when written, and will be
- # provided to the unpack() method when reading. If the underlying format of the data
- # changes, make sure to change this.
+ # This will accompany the packed data as an attribute when written, and will be provided to the
+ # unpack() method when reading. If the underlying format of the data changes, make sure to
+ # change this.
version: Optional[str] = None
@staticmethod
def pack(data: Sequence[any]) -> Tuple[numpy.ndarray, Dict[str, any]]:
"""
- Given unpacked data, return packed data and a dictionary of attributes needed to
- unpack it.
+ Given unpacked data, return packed data and a dictionary of attributes needed to unpack it.
- This should perform the fundamental packing operation, returning the packed data
- and any metadata ("attributes") that would be necessary to unpack the data. The
- class's version is always stored, so no need to provide it as an attribute.
+ This should perform the fundamental packing operation, returning the packed data and any
+ metadata ("attributes") that would be necessary to unpack the data. The class's version is
+ always stored, so no need to provide it as an attribute.
See Also
--------
@@ -232,11 +231,11 @@ class Parameter:
_validName = re.compile("^[a-zA-Z0-9_]+$")
- # Using slots because Parameters are pretty static and mostly POD. __slots__ make
- # this official, and offer some performance benefits in memory (not too important;
- # there aren't that many instances of Parameter to begin with) and attribute access
- # time (more important, since we need to go through Parameter objects to get to a
- # specific parameter's value in a ParameterCollection)
+ # Using slots because Parameters are pretty static and mostly POD. __slots__ make this official,
+ # and offer some performance benefits in memory (not too important; there aren't that many
+ # instances of Parameter to begin with) and attribute access time (more important, since we need
+ # to go through Parameter objects to get to a specific parameter's value in a
+ # ParameterCollection)
__slots__ = (
"name",
"fieldName",
@@ -300,8 +299,8 @@ def paramGetter(p_self):
value = getattr(p_self, self.fieldName)
if value is NoDefault:
raise ParameterError(
- "Cannot get value for parameter `{}` in `{}` as no default has "
- "been defined, and no value has been assigned.".format(
+ "Cannot get value for parameter `{}` in `{}` as no default has been "
+ "defined, and no value has been assigned.".format(
self.name, type(p_self)
)
)
@@ -342,9 +341,9 @@ def __get__(self, obj, cls=None):
Notes
-----
- We do not check to see if ``cls != None``. This is an optimization choice, that
- someone may deem unnecessary. As a result, unlike Python's ``property`` class, a
- subclass cannot override the getter method.
+ We do not check to see if ``cls != None``. This is an optimization choice, that someone may
+ deem unnecessary. As a result, unlike Python's ``property`` class, a subclass cannot
+ override the getter method.
"""
return self._getter(obj)
@@ -355,18 +354,17 @@ def setter(self, setter):
:id: I_ARMI_PARAM_PARALLEL
:implements: R_ARMI_PARAM_PARALLEL
- Parameters need to be handled properly during parallel code execution. This
- includes notifying processes if a parameter has been updated by
- another process. This method allows for setting a parameter's value as well
- as an attribute that signals whether this parameter has been updated. Future
- processes will be able to query this attribute so that the parameter's
- status is properly communicated.
+ Parameters need to be handled properly during parallel code execution. This includes
+ notifying processes if a parameter has been updated by another process. This method
+ allows for setting a parameter's value as well as an attribute that signals whether this
+ parameter has been updated. Future processes will be able to query this attribute so
+ that the parameter's status is properly communicated.
Notes
-----
- Unlike the traditional Python ``property`` class, this does not return a new
- instance of a ``Parameter``; therefore it cannot be reassigned in the same way
- that a Python ``property`` can be.
+ Unlike the traditional Python ``property`` class, this does not return a new instance of a
+ ``Parameter``; therefore it cannot be reassigned in the same way that a Python ``property``
+ can be.
Examples
--------
@@ -435,14 +433,13 @@ class ParameterDefinitionCollection:
Notes
-----
- ``_representedTypes`` is used to detect if this ``ParameterDefinitionCollection``
- contains definitions for only one type. If the collection only exists for 1 type,
- the lookup (``__getitem__``) can short circuit O(n) logic for O(1) dictionary
- lookup.
+ ``_representedTypes`` is used to detect if this ``ParameterDefinitionCollection`` contains
+ definitions for only one type. If the collection only exists for 1 type, the lookup
+ (``__getitem__``) can short circuit O(n) logic for O(1) dictionary lookup.
"""
- # Slots are not being used here as an attempt at optimization. Rather, they serve to
- # add some needed rigidity to the parameter system.
+ # Slots are not being used here as an attempt at optimization. Rather, they serve to add some
+ # needed rigidity to the parameter system.
__slots__ = ("_paramDefs", "_paramDefDict", "_representedTypes", "_locked")
def __init__(self):
@@ -462,15 +459,15 @@ def __getitem__(self, name):
Notes
-----
- This method might break if the collection is for multiple composite types, and
- there exists a parameter with the same name in multiple types.
+ This method might break if the collection is for multiple composite types, and there exists
+ a parameter with the same name in multiple types.
"""
# O(1) lookup if there is only 1 type, could still raise a KeyError
if len(self._representedTypes) == 1:
return self._paramDefDict[name, next(iter(self._representedTypes))]
- # "matches" only checks for the same name, while the add method checks both name
- # and collectionType
+ # "matches" only checks for the same name, while the add method checks both name and
+ # collectionType
matches = [pd for pd in self if pd.name == name]
if len(matches) != 1:
raise KeyError(
@@ -507,8 +504,8 @@ def extend(self, other):
assert self is not other
if other is None:
raise ValueError(
- f"Cannot extend {self} with `None`. "
- "Ensure return value of parameter definitions returns something."
+ f"Cannot extend {self} with `None`. Ensure return value of parameter definitions "
+ "returns something."
)
for pd in other:
self.add(pd)
@@ -524,8 +521,8 @@ def atLocation(self, paramLoc):
"""
Make a param definition collection with all defs defined at a specific location.
- Parameters can be defined at various locations within their container
- based on :py:class:`ParamLocation`. This allows selection by those values.
+ Parameters can be defined at various locations within their container based on
+ :py:class:`ParamLocation`. This allows selection by those values.
"""
return self._filter(lambda pd: pd.atLocation(paramLoc))
@@ -605,15 +602,15 @@ def toWriteToDB(self, assignedMask: Optional[int] = None):
:id: I_ARMI_PARAM_DB
:implements: R_ARMI_PARAM_DB
- This method is called when writing the parameters to the database file. It
- queries the parameter's ``saveToDB`` attribute to ensure that this parameter
- is desired for saving to the database file. It returns a list of parameters
- that should be included in the database write operation.
+ This method is called when writing the parameters to the database file. It queries the
+ parameter's ``saveToDB`` attribute to ensure that this parameter is desired for saving
+ to the database file. It returns a list of parameters that should be included in the
+ database write operation.
Parameters
----------
assignedMask : int
- a bitmask to down-filter which params to use based on how "stale" they are.
+ A bitmask to down-filter which params to use based on how "stale" they are.
"""
mask = assignedMask or SINCE_ANYTHING
return [p for p in self if p.saveToDB and p.assigned & mask]
@@ -623,8 +620,8 @@ def createBuilder(self, *args, **kwargs):
Create an associated object that can create definitions into this collection.
Using the returned ParameterBuilder will add all defined parameters to this
- ParameterDefinitionCollection, using the passed arguments as defaults. Arguments
- should be valid arguments to ``ParameterBuilder.__init__()``
+ ParameterDefinitionCollection, using the passed arguments as defaults. Arguments should be
+ valid arguments to ``ParameterBuilder.__init__()``
"""
paramBuilder = ParameterBuilder(*args, **kwargs)
paramBuilder.associateParameterDefinitionCollection(self)
@@ -675,9 +672,9 @@ def associateParameterDefinitionCollection(self, paramDefs):
"""
Associate this parameter factory with a specific ParameterDefinitionCollection.
- Subsequent calls to defParam will automatically add the created
- ParameterDefinitions to this ParameterDefinitionCollection. This results in a
- cleaner syntax when defining many ParameterDefinitions.
+ Subsequent calls to defParam will automatically add the created ParameterDefinitions to this
+ ParameterDefinitionCollection. This results in a cleaner syntax when defining many
+ ParameterDefinitions.
"""
self._paramDefs = paramDefs
@@ -737,10 +734,9 @@ def defParam(
Notes
-----
- It is not possible to initialize the parameter on the class this method would be
- used on, because there is no instance (i.e. self) when this method is run.
- However, this method could access a globally available set of definitions, if
- one existed.
+ It is not possible to initialize the parameter on the class this method would be used on,
+ because there is no instance (i.e. self) when this method is run. However, this method could
+ access a globally available set of definitions, if one existed.
"""
self._assertDefaultIsProperType(default)
if location is None and self._defaultLocation is None:
@@ -766,7 +762,6 @@ def defParam(
return paramDef
-# Container for all parameter definition collections that have been bound to an
-# ArmiObject or subclass. These are added from the applyParameters() method on
-# the ParameterCollection class.
+# Container for all parameter definition collections that have been bound to an ArmiObject or
+# subclass. These are added from the applyParameters() method on the ParameterCollection class.
ALL_DEFINITIONS = ParameterDefinitionCollection()
diff --git a/armi/reactor/reactorParameters.py b/armi/reactor/reactorParameters.py
index cc9fe34541..270faded91 100644
--- a/armi/reactor/reactorParameters.py
+++ b/armi/reactor/reactorParameters.py
@@ -127,8 +127,9 @@ def defineCoreParameters():
"detailedNucKeys",
setter=isNumpyArray("detailedNucKeys"),
units=units.UNITLESS,
- description="""Nuclide vector keys, used to map densities in b.p.detailedNDens and a.p.detailedNDens.
- ZZZAAA (ZZZ atomic number, AAA mass number, + 100 * m for metastable states.""",
+ description="""Nuclide vector keys, used to map densities in b.p.detailedNDens and
+ a.p.detailedNDens.ZZZAAA (ZZZ atomic number, AAA mass number, + 100 * m for metastable
+ states.""",
saveToDB=True,
default=None,
)
@@ -139,8 +140,8 @@ def defineCoreParameters():
"orientation",
units=units.DEGREES,
description=(
- "Triple representing rotations counterclockwise around each spatial axis. For example, "
- "a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
+ "Triple representing rotations counterclockwise around each spatial axis. For "
+ "example, a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
),
default=None,
)
@@ -181,14 +182,20 @@ def defineCoreParameters():
default=0.0,
units=units.PCM,
saveToDB=True,
- description="Worth requirement for the primary control rods in the reactor core to achieve safe shutdown.",
+ description=(
+ "Worth requirement for the primary control rods in the reactor core to "
+ "achieve safe shutdown."
+ ),
)
pb.defParam(
"crWorthRequiredSecondary",
default=0.0,
units=units.PCM,
saveToDB=True,
- description="Worth requirement for the secondary control rods in the reactor core to achieve safe shutdown.",
+ description=(
+ "Worth requirement for the secondary control rods in the reactor core to "
+ "achieve safe shutdown."
+ ),
)
pb.defParam(
"crTransientOverpowerWorth",
@@ -196,8 +203,8 @@ def defineCoreParameters():
units=units.PCM,
saveToDB=True,
description=(
- "Reactivity worth introduced by removal of the highest worth primary "
- "control rod from the core, starting from its critical position"
+ "Reactivity worth introduced by removal of the highest worth primary control rod "
+ "from the core, starting from its critical position"
),
)
@@ -216,7 +223,10 @@ def defineCoreParameters():
pb.defParam(
"referenceBlockAxialMesh",
units=units.CM,
- description="The axial block boundaries that assemblies should conform to in a uniform mesh case.",
+ description=(
+ "The axial block boundaries that assemblies should conform to in a "
+ "uniform mesh case."
+ ),
default=None,
)
@@ -229,8 +239,8 @@ def defineCoreParameters():
pb.defParam(
"doublingTime",
units=units.YEARS,
- description="""The time it takes to produce enough spent fuel to fuel a daughter reactor,
- in effective number of years at full power.""",
+ description="""The time it takes to produce enough spent fuel to fuel a daughter
+ reactor, in effective number of years at full power.""",
)
pb.defParam(
@@ -272,8 +282,9 @@ def defineCoreParameters():
pb.defParam(
"maxcladFCCI",
units=units.MICRONS,
- description="The core wide maximum amount of cladding wastage due to fuel chemical clad interaction calculated "
- + "at the 0-sigma TH HCF temperatures and using the conservative FCCI model",
+ description="The core wide maximum amount of cladding wastage due to fuel chemical "
+ + "clad interaction calculated at the 0-sigma TH HCF temperatures and using the "
+ + "conservative FCCI model",
default=0.0,
)
@@ -340,7 +351,8 @@ def defineCoreParameters():
pb.defParam(
"THmaxDeltaPPump",
units=units.PASCALS,
- description="The maximum pumping pressure rise required to pump the given mass flow rate through the rod bundle",
+ description="The maximum pumping pressure rise required to pump the given mass flow "
+ + "rate through the rod bundle",
)
pb.defParam(
@@ -516,8 +528,8 @@ def defineCoreParameters():
"jumpRing",
units=units.UNITLESS,
description=(
- "Radial ring number where bred-up fuel assemblies shuffle jump from the low power to the "
- "high power region."
+ "Radial ring number where bred-up fuel assemblies shuffle jump from the low power "
+ "to the high power region."
),
)
diff --git a/armi/reactor/reactors.py b/armi/reactor/reactors.py
index e18ac04e35..2c56202c65 100644
--- a/armi/reactor/reactors.py
+++ b/armi/reactor/reactors.py
@@ -13,15 +13,12 @@
# limitations under the License.
"""
-Reactor objects represent the highest level in the hierarchy of
-structures that compose the system to be modeled. Core objects
-represent collections of assemblies.
-
-Core is a high-level object in the data model in ARMI. They
-contain assemblies which in turn contain more refinement in
-representing the physical reactor. The reactor is the owner of
-many of the plant-wide state variables such as keff, cycle,
-and node.
+Reactor objects represent the highest level in the hierarchy of structures that compose the system
+to be modeled. Core objects represent collections of assemblies.
+
+Core is a high-level object in the data model in ARMI. They contain assemblies which in turn contain
+more refinement in representing the physical reactor. The reactor is the owner of many of the plant-
+wide state variables such as keff, cycle, and node.
"""
from typing import Optional
import collections
@@ -67,8 +64,7 @@
class Reactor(composites.Composite):
"""
- Top level of the composite structure, potentially representing all
- components in a reactor.
+ Top level of the composite structure, potentially representing all components in a reactor.
This class contains the core and any ex-core structures that are to be represented in the ARMI
model. Historically, the ``Reactor`` contained only the core. To support better representation
@@ -886,14 +882,14 @@ def countBlocksWithFlags(self, blockTypeSpec, assemTypeSpec=None):
The types of blocks to be counted in a single assembly
assemTypeSpec : Flags or list of Flags
- The types of assemblies that are to be examine for the blockTypes
- of interest. None is every assembly
+ The types of assemblies that are to be examine for the blockTypes of interest. None is
+ every assembly.
Returns
-------
maxBlocks : int
- The maximum number of blocks of the specified types in a single
- assembly in the entire core
+ The maximum number of blocks of the specified types in a single assembly in the entire
+ core.
"""
assems = self.getAssemblies(typeSpec=assemTypeSpec)
try:
@@ -1018,8 +1014,8 @@ def getAssembliesInSquareOrHexRing(
self, ring, typeSpec=None, exactType=False, exclusions=None
):
"""
- Returns the assemblies in a specified ring. Definitions of rings can change
- with problem parameters.
+ Returns the assemblies in a specified ring. Definitions of rings can change with problem
+ parameters.
Parameters
----------
@@ -1066,9 +1062,8 @@ def getAssembliesInCircularRing(
self, ring, typeSpec=None, exactType=False, exclusions=None
):
"""
- Gets an assemblies within a circular range of the center of the core. This
- function allows for more circular styled assembly shuffling instead of the
- current hex approach.
+ Gets an assemblies within a circular range of the center of the core. This function allows
+ for more circular styled assembly shuffling instead of the current hex approach.
Parameters
----------
@@ -1120,7 +1115,8 @@ def getAssembliesInCircularRing(
def buildCircularRingDictionary(self, ringPitch=1.0):
"""
- Builds a dictionary of all circular rings in the core. This is required information for getAssembliesInCircularRing.
+ Builds a dictionary of all circular rings in the core. This is required information for
+ getAssembliesInCircularRing.
The purpose of this function is to allow for more circular core shuffling in the hex design.
@@ -1160,12 +1156,12 @@ def _getAssembliesByName(self):
for assem in self.getAssemblies(includeBolAssems=True, includeSFP=True):
aName = assem.getName()
if aName in assymap and assymap[aName] != assem:
- # dangerous situation that can occur in restart runs where the global assemNum isn't updated.
- # !=assem clause added because sometimes an assem is in one of the includeAll lists that is also in the
- # core and that's ok.
+ # dangerous situation that can occur in restart runs where the global assemNum isn't
+ # updated. !=assem clause added because sometimes an assem is in one of the
+ # includeAll lists that is also in the core and that's ok.
runLog.error(
- "Two (or more) assemblies in the reactor (and associated lists) have the name {0},\n"
- "including {1} and {2}.".format(aName, assem, assymap[aName])
+ "Two (or more) assemblies in the reactor (and associated lists) have the name "
+ "{0},\nincluding {1} and {2}.".format(aName, assem, assymap[aName])
)
raise RuntimeError("Assembly name collision.")
@@ -1179,8 +1175,8 @@ def getAssemblyByName(self, name):
:id: I_ARMI_R_GET_ASSEM_NAME
:implements: R_ARMI_R_GET_ASSEM_NAME
- This method returns the :py:class:`assembly
- ` with a name matching the
+ This method returns the :py:class:`assembly `
+ with a name matching the
value provided as an input parameter to this function. The ``name`` of
an assembly is based on the ``assemNum`` parameter.
@@ -1814,7 +1810,7 @@ def findNeighbors(
This uses the 'mcnp' index map (MCNP GEODST hex coordinates) instead of
the standard (ring, pos) map. because neighbors have consistent indices
- this way. We then convert over to (ring, pos) using the lookup table
+ this way. We then convert over to (ring, pos) using the lookup table
that a reactor has.
Returns
@@ -2218,12 +2214,11 @@ def findAllRadMeshPoints(self, extraAssems=None, applySubMesh=True):
Parameters
----------
extraAssems : list
- additional assemblies to consider when determining the mesh points. They may
- be useful in the MCPNXT models to represent the fuel management dummies.
+ additional assemblies to consider when determining the mesh points. They may be useful
+ in the MCPNXT models to represent the fuel management dummies.
applySubMesh : bool
- (not implemented) generates submesh points to further discretize the radial
- reactor mesh
+ (not implemented) generates submesh points to further discretize the radial reactor mesh
"""
_, j, _ = self.findAllMeshPoints(extraAssems, applySubMesh)
return j
@@ -2252,9 +2247,8 @@ def getMaxNumPins(self):
def getMinimumPercentFluxInFuel(self, target=0.005):
"""
- Goes through the entire reactor to determine what percentage of flux occurs at
- each ring. Starting with the outer ring, this function helps determine the effective
- size of the core where additional assemblies will not help the breeding in the TWR.
+ Starting with the outer ring, this method goes through the entire Reactor to determine what
+ percentage of flux occurs at each ring.
Parameters
----------
diff --git a/armi/reactor/tests/test_blocks.py b/armi/reactor/tests/test_blocks.py
index 257a2d4009..895c31cfde 100644
--- a/armi/reactor/tests/test_blocks.py
+++ b/armi/reactor/tests/test_blocks.py
@@ -1617,8 +1617,9 @@ def test_consistentMassDensityVolumeBetweenColdBlockAndColdComponents(self):
)
for expected, actual in zip(expectedData, actualData):
- msg = "Data (component, density, volume, mass) for component {} does not match. Expected: {}, Actual: {}".format(
- expected[0], expected, actual
+ msg = (
+ "Data (component, density, volume, mass) for component {} does not match. "
+ "Expected: {}, Actual: {}".format(expected[0], expected, actual)
)
for expectedVal, actualVal in zip(expected, actual):
self.assertAlmostEqual(expectedVal, actualVal, msg=msg)
@@ -1634,8 +1635,9 @@ def test_consistentMassDensityVolumeBetweenHotBlockAndHotComponents(self):
)
for expected, actual in zip(expectedData, actualData):
- msg = "Data (component, density, volume, mass) for component {} does not match. Expected: {}, Actual: {}".format(
- expected[0], expected, actual
+ msg = (
+ "Data (component, density, volume, mass) for component {} does not match. "
+ "Expected: {}, Actual: {}".format(expected[0], expected, actual)
)
for expectedVal, actualVal in zip(expected, actual):
self.assertAlmostEqual(expectedVal, actualVal, msg=msg)
@@ -1646,11 +1648,11 @@ def test_consistentAreaWithOverlappingComponents(self):
Notes
-----
- This test calculates a reference coolant area by subtracting the areas of the intercoolant, duct, wire wrap,
- and pins from the total hex block area.
- The area of the pins is calculated using only the outer radius of the clad.
- This avoids the use of negative areas as implemented in Block.getVolumeFractions.
- Na-23 mass will not be conserved as when duct/clad expands sodium is evacuated
+ This test calculates a reference coolant area by subtracting the areas of the intercoolant,
+ duct, wire wrap, and pins from the total hex block area. The area of the pins is calculated
+ using only the outer radius of the clad. This avoids the use of negative areas as
+ implemented in Block.getVolumeFractions. Na-23 mass will not be conserved as when duct/clad
+ expands sodium is evacuated.
See Also
--------
@@ -1782,7 +1784,7 @@ def test_getReactionRates(self):
class TestNegativeVolume(unittest.TestCase):
def test_negativeVolume(self):
- """Build a block with WAY too many fuel pins and show that the derived volume is negative."""
+ """Build a Block with WAY too many fuel pins & show that the derived volume is negative."""
block = blocks.HexBlock("TestHexBlock")
coldTemp = 20
@@ -1916,8 +1918,7 @@ def test_getNumPins(self):
def test_block_dims(self):
"""
- Tests that the block class can provide basic dimensionality information about
- itself.
+ Tests that the block class can provide basic dimensionality information about itself.
.. test:: Important block dimensions are retrievable.
:id: T_ARMI_BLOCK_DIMS
@@ -2003,8 +2004,8 @@ def test_getPitchHomogeneousBlock(self):
Notes
-----
- This assumes there are 3 materials in the homogeneous block, one with half
- the area fraction, and 2 with 1/4 each.
+ This assumes there are 3 materials in the homogeneous block, one with half the area
+ fraction, and 2 with 1/4 each.
"""
desiredPitch = 14.0
hexTotalArea = hexagon.area(desiredPitch)
@@ -2013,18 +2014,17 @@ def test_getPitchHomogeneousBlock(self):
areaFractions = [0.5, 0.25, 0.25]
materials = ["HT9", "UZr", "Sodium"]
- # There are 2 ways to do this, the first is to pick a component to be the pitch
- # defining component, and given it the shape of a hexagon to define the pitch
- # The hexagon outer pitch (op) is defined by the pitch of the block/assembly.
- # the ip is defined by whatever thickness is necessary to have the desired area
- # fraction. The second way is shown in the second half of this test.
+ # There are 2 ways to do this, the first is to pick a component to be the pitch defining
+ # component, and given it the shape of a hexagon to define the pitch. The hexagon outer
+ # pitch (op) is defined by the pitch of the block/assembly. The ip is defined by whatever
+ # thickness is necessary to have the desired area fraction. The second way is shown in the
+ # second half of this test.
hexBlock = blocks.HexBlock("TestHexBlock")
hexComponentArea = areaFractions[0] * hexTotalArea
- # Picking 1st material to use for the hex component here, but really the choice
- # is arbitrary.
- # area grows quadratically with op
+ # Picking 1st material to use for the hex component here, but really the choice is
+ # arbitrary. area grows quadratically with op
ipNeededForCorrectArea = desiredPitch * areaFractions[0] ** 0.5
self.assertEqual(
hexComponentArea, hexTotalArea - hexagon.area(ipNeededForCorrectArea)
@@ -2049,10 +2049,10 @@ def test_getPitchHomogeneousBlock(self):
self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea())
self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea)
- # For this second way, we will simply define the 3 components as unshaped, with
- # the desired area fractions, and make a 4th component that is an infinitely
- # thin hexagon with the the desired pitch. The downside of this method is that
- # now the block has a fourth component with no volume.
+ # For this second way, we will simply define the 3 components as unshaped, with the desired
+ # area fractions, and make a 4th component that is an infinitely thin hexagon with the the
+ # desired pitch. The downside of this method is that now the block has a fourth component
+ # with no volume.
hexBlock = blocks.HexBlock("TestHexBlock")
for aFrac, material in zip(areaFractions, materials):
unshapedArgs = {"area": hexTotalArea * aFrac}
@@ -2085,8 +2085,7 @@ def test_getPinCenterFlatToFlat(self):
self.assertAlmostEqual(pinCenterFlatToFlat, f2f)
def test_gridCreation(self):
- """Create a grid for a block, and show that it can handle components with
- multiplicity > 1.
+ """Create a grid for a block, and show that it can handle components with multiplicity > 1.
.. test:: Grids can handle components with multiplicity > 1.
:id: T_ARMI_GRID_MULT
@@ -2264,9 +2263,8 @@ def test_axial(self):
def test_verifyBlockDims(self):
"""
- This function is currently null. It consists of a single line that
- returns nothing. This test covers that line. If the function is ever
- implemented, it can be tested here.
+ This function is currently null. It consists of a single line that returns nothing. This
+ test covers that line. If the function is ever implemented, it can be tested here.
"""
self.ThRZBlock.verifyBlockDims()
@@ -2319,8 +2317,8 @@ def test_getPitchHomogeneousBlock(self):
Notes
-----
- This assumes there are 3 materials in the homogeneous block, one with half
- the area fraction, and 2 with 1/4 each.
+ This assumes there are 3 materials in the homogeneous block, one with half the area
+ fraction, and 2 with 1/4 each.
"""
desiredPitch = (10.0, 12.0)
rectTotalArea = desiredPitch[0] * desiredPitch[1]
@@ -2329,24 +2327,21 @@ def test_getPitchHomogeneousBlock(self):
areaFractions = [0.5, 0.25, 0.25]
materials = ["HT9", "UZr", "Sodium"]
- # There are 2 ways to do this, the first is to pick a component to be the pitch
- # defining component, and given it the shape of a rectangle to define the pitch
- # The rectangle outer dimensions is defined by the pitch of the block/assembly.
- # the inner dimensions is defined by whatever thickness is necessary to have
- # the desired area fraction.
- # The second way is to define all physical material components as unshaped, and
- # add an additional infinitely thin Void component (no area) that defines pitch.
- # See second part of HexBlock_TestCase.test_getPitchHomogeneousBlock for
- # demonstration.
+ # There are 2 ways to do this, the first is to pick a component to be the pitch defining
+ # component, and given it the shape of a rectangle to define the pitch. The rectangle outer
+ # dimensions is defined by the pitch of the block/assembly. The inner dimensions is defined
+ # by whatever thickness is necessary to have the desired area fraction. The second way is to
+ # define all physical material components as unshaped, and add an additional infinitely thin
+ # Void component (no area) that defines pitch. See second part of
+ # HexBlock_TestCase.test_getPitchHomogeneousBlock for demonstration.
cartBlock = blocks.CartesianBlock("TestCartBlock")
hexComponentArea = areaFractions[0] * rectTotalArea
- # Picking 1st material to use for the hex component here, but really the choice
- # is arbitrary.
+ # Picking 1st material to use for the hex component here, but really the choice is
+ # arbitrary.
# area grows quadratically with outer dimensions.
- # Note there are infinitely many inner dims that would preserve area,
- # this is just one of them.
+ # Note there are infinitely many inner dims that would preserve area, this is just one.
innerDims = [dim * areaFractions[0] ** 0.5 for dim in desiredPitch]
self.assertAlmostEqual(
hexComponentArea, rectTotalArea - innerDims[0] * innerDims[1]
@@ -2393,28 +2388,24 @@ def test_getHydraulicDiameter(self):
class MassConservationTests(unittest.TestCase):
- r"""Tests designed to verify mass conservation during thermal expansion."""
+ """Tests designed to verify mass conservation during thermal expansion."""
def setUp(self):
self.b = buildSimpleFuelBlock()
def test_heightExpansionDifferences(self):
- r"""The point of this test is to determine if the number densities stay the same
- with two different heights of the same block. Since we want to expand a block
- from cold temperatures to hot using the fuel expansion coefficient (most important neutronicall),
- other components are not grown correctly. This means that on the block level, axial expansion will
- NOT conserve mass of non-fuel components. However, the excess mass is simply added to the top of
+ """The point of this test is to determine if the number densities stay the same with two
+ different heights of the same block. Since we want to expand a block from cold temperatures
+ to hot using the fuel expansion coefficient (most important neutronicall), other components
+ are not grown correctly. This means that on the block level, axial expansion will NOT
+ conserve mass of non-fuel components. However, the excess mass is simply added to the top of
the reactor in the plenum regions (or any non fueled region).
"""
- # assume the default block height is 'cold' height. Now we must determine
- # what the hot height should be based on thermal expansion. Change the height
- # of the block based on the different thermal expansions of the components then
- # see the effect on the number densities.
-
+ # Assume the default block height is 'cold' height. Now we must determine what the hot
+ # height should be based on thermal expansion. Change the height of the block based on the
+ # different thermal expansions of the components then see the effect on number densities.
fuel = self.b.getComponent(Flags.FUEL)
-
height = self.b.getHeight()
-
Thot = fuel.temperatureInC
Tcold = fuel.inputTemperatureInC
@@ -2444,16 +2435,18 @@ def test_heightExpansionDifferences(self):
hotFuelU238,
hotCladU238,
10,
- "Number Density of fuel in one height ({0}) != number density of fuel at another height {1}. Number density conservation "
- "violated during thermal expansion".format(hotFuelU238, hotCladU238),
+ "Number Density of fuel in one height ({0}) != number density of fuel at another "
+ "height {1}. Number density conservation violated during thermal "
+ "expansion".format(hotFuelU238, hotCladU238),
)
self.assertAlmostEqual(
hotFuelIRON,
hotCladIRON,
10,
- "Number Density of clad in one height ({0}) != number density of clad at another height {1}. Number density conservation "
- "violated during thermal expansion".format(hotFuelIRON, hotCladIRON),
+ "Number Density of clad in one height ({0}) != number density of clad at another "
+ "height {1}. Number density conservation violated during thermal "
+ "expansion".format(hotFuelIRON, hotCladIRON),
)
def test_massFuelHeatup(self):
@@ -2466,8 +2459,8 @@ def test_massFuelHeatup(self):
massCold,
massHot,
10,
- "Cold mass of fuel ({0}) != hot mass {1}. Mass conservation "
- "violated during thermal expansion".format(massCold, massHot),
+ "Cold mass of fuel ({0}) != hot mass {1}. Mass conservation violated during thermal "
+ "expansion".format(massCold, massHot),
)
def test_massCladHeatup(self):
@@ -2480,8 +2473,8 @@ def test_massCladHeatup(self):
massCold,
massHot,
10,
- "Cold mass of clad ({0}) != hot mass {1}. Mass conservation "
- "violated during thermal expansion".format(massCold, massHot),
+ "Cold mass of clad ({0}) != hot mass {1}. Mass conservation violated during thermal "
+ "expansion".format(massCold, massHot),
)
def test_massDuctHeatup(self):
@@ -2508,10 +2501,8 @@ def test_massCoolHeatup(self):
self.assertGreater(
massCold,
massHot,
- "Cold mass of coolant ({0}) <= hot mass {1}. Mass conservation "
- "not violated during thermal expansion of coolant".format(
- massCold, massHot
- ),
+ "Cold mass of coolant ({0}) <= hot mass {1}. Mass conservation not violated during "
+ "thermal expansion of coolant".format(massCold, massHot),
)
def test_dimensionDuctHeatup(self):
@@ -2525,8 +2516,8 @@ def test_dimensionDuctHeatup(self):
correctHot,
pitchHot,
10,
- "Theoretical pitch of duct ({0}) != hot pitch {1}. Linear expansion "
- "violated during heatup. \nTc={tc} Tref={tref} dLL={dLL} cold={pcold}".format(
+ "Theoretical pitch of duct ({0}) != hot pitch {1}. Linear expansion violated during "
+ "heatup. \nTc={tc} Tref={tref} dLL={dLL} cold={pcold}".format(
correctHot,
pitchHot,
tc=duct.temperatureInC,
@@ -2540,8 +2531,8 @@ def test_coldMass(self):
"""
Verify that the cold mass is what it should be, even though the hot height is input.
- At the cold temperature (but with hot height), the mass should be the same as at hot temperature
- and hot height.
+ At the cold temperature (but with hot height), the mass should be the same as at hot
+ temperature and hot height.
"""
fuel = self.b.getComponent(Flags.FUEL)
# set ref (input/cold) temperature.
@@ -2570,7 +2561,7 @@ def test_coldMass(self):
)
def test_massConsistency(self):
- r"""Verify that the sum of the component masses equals the total mass."""
+ """Verify that the sum of the component masses equals the total mass."""
tMass = 0.0
for child in self.b:
tMass += child.getMass()
diff --git a/armi/reactor/tests/test_components.py b/armi/reactor/tests/test_components.py
index fdd031f43e..4b57f422df 100644
--- a/armi/reactor/tests/test_components.py
+++ b/armi/reactor/tests/test_components.py
@@ -578,19 +578,17 @@ def test_getArea(self):
def test_componentInteractionsLinkingByDimensions(self):
"""Tests linking of Components by dimensions.
- .. test:: Show the dimensions of a liquid Component can be defined to depend on the solid Components that bound it.
+ The component ``gap``, representing the fuel-clad gap filled with Void, is defined with
+ dimensions that depend on the fuel outer diameter and clad inner diameter. The
+ :py:meth:`~armi.reactor.components.component.Component.resolveLinkedDims` method links the
+ gap dimensions appropriately when the Component is constructed, and the test shows the area
+ of the gap is calculated correctly based on the thermally-expanded dimensions of the fuel
+ and clad Components.
+
+ .. test:: Show the dimensions of a liquid Component can be defined to depend on the solid
+ Components that bound it.
:id: T_ARMI_COMP_FLUID1
:tests: R_ARMI_COMP_FLUID
-
- The component ``gap``, representing the fuel-clad gap filled with Void,
- is defined with dimensions that depend on the fuel outer diameter and
- clad inner diameter. The
- :py:meth:`~armi.reactor.components.component.Component.resolveLinkedDims`
- method links the gap dimensions appropriately when the Component is
- constructed, and the test shows the area of the gap is calculated
- correctly based on the thermally-expanded dimensions of the fuel and
- clad Components.
-
"""
nPins = 217
fuelDims = {"Tinput": 25.0, "Thot": 430.0, "od": 0.9, "id": 0.0, "mult": nPins}
@@ -770,23 +768,23 @@ def expansionConservationHotHeightDefined(self, mat: str, isotope: str):
circle.material.density(Tc=circle.temperatureInC),
)
- # brief 2D expansion with set temp to show mass is conserved
- # hot height would come from block value
+ # brief 2D expansion with set temp to show mass is conserved hot height would come from
+ # block value
warmMass = circle1.density() * circle1.getArea() * hotHeight
circle1.setTemperature(self.tHot)
hotMass = circle1.density() * circle1.getArea() * hotHeight
self.assertAlmostEqual(warmMass, hotMass)
circle1.setTemperature(self.tWarm)
- # Change temp to circle 2 temp to show equal to circle2
- # and then change back to show recoverable to original values
+ # Change temp to circle 2 temp to show equal to circle2 and then change back to show
+ # recoverable to original values
oldArea = circle1.getArea()
initialDens = circle1.density()
# when block.setHeight is called (which effectively changes component height)
- # component.setNumberDensity is called (for solid isotopes) to adjust the number
- # density so that now the 2D expansion will be approximated/expanded around
- # the hot temp which is akin to these adjustments
+ # component.setNumberDensity is called (for solid isotopes) to adjust the number density so
+ # that now the 2D expansion will be approximated/expanded around the hot temp which is akin
+ # to these adjustments
heightFactor = circle1.getHeightFactor(self.tHot)
circle1.adjustDensityForHeightExpansion(self.tHot) # apply temp at new height
circle1.setTemperature(self.tHot)
diff --git a/armi/reactor/tests/test_composites.py b/armi/reactor/tests/test_composites.py
index bd5c65d6b7..203e71ab54 100644
--- a/armi/reactor/tests/test_composites.py
+++ b/armi/reactor/tests/test_composites.py
@@ -439,7 +439,10 @@ def test_constituentReport(self):
runLog.info(self.r.core.getFirstBlock().getComponents()[0].constituentReport())
def test_getNuclides(self):
- """The getNuclides should return all keys that have ever been in this block, including values that are at trace."""
+ """
+ The getNuclides should return all keys that have ever been in this block, including values
+ that are at trace.
+ """
cur = self.Block.getNuclides()
ref = self.refDict.keys()
for key in ref:
@@ -448,9 +451,8 @@ def test_getNuclides(self):
def test_getFuelMass(self):
"""
- This test creates a dummy assembly and ensures that the assembly, block, and fuel component masses are
- consistent.
- `getFuelMass` ensures that the fuel component is used to `getMass`.
+ This test creates a dummy assembly and ensures that the assembly, block, and fuel component
+ masses are consistent. `getFuelMass` ensures that the fuel component is used to `getMass`.
"""
cs = settings.Settings()
assemDesign = assemblyBlueprint.AssemblyBlueprint.load(self.blueprintYaml)
diff --git a/armi/utils/codeTiming.py b/armi/utils/codeTiming.py
index b205bbe11e..c01b214358 100644
--- a/armi/utils/codeTiming.py
+++ b/armi/utils/codeTiming.py
@@ -182,7 +182,7 @@ def getActiveTimers():
@staticmethod
def report(inclusion_cutoff=0.1, total_time=False):
- r"""
+ """
Write a string report of the timers.
Parameters
@@ -190,7 +190,8 @@ def report(inclusion_cutoff=0.1, total_time=False):
inclusion_cutoff : float, optional
Will not show results that have less than this fraction of the total time.
total_time : bool, optional
- Use either the ratio of total time or time since last report for consideration against the cutoff
+ Use either the ratio of total time or time since last report for consideration against
+ the cutoff
See Also
--------
@@ -232,7 +233,8 @@ def timeline(base_file_name, inclusion_cutoff=0.1, total_time=False):
inclusion_cutoff : float, optional
Will not show results that have less than this fraction of the total time.
total_time : bool, optional
- Use either the ratio of total time or time since last report for consideration against the cutoff
+ Use either the ratio of total time or time since last report for consideration against
+ the cutoff
"""
import matplotlib.pyplot as plt
import numpy as np
@@ -308,12 +310,13 @@ def flatMerge(
class _Timer:
- r"""Code timer to call at various points to measure performance.
+ """Code timer to call at various points to measure performance.
see MasterTimer.getTimer() for construction
"""
- _frozen = False # if the master timer stops, all timers must freeze, with no thaw (how would that make sense in a run?)
+ # If the master timer stops, all timers must freeze with no thaw.
+ _frozen = False
def __init__(self, name, start):
self.name = name
@@ -374,7 +377,7 @@ def timeSinceReport(self):
@property
def times(self):
- """List of time start and stop pairs, if active the current time is used as the last stop."""
+ """List of time start / stop pairs, if active the current time is used as the last stop."""
if self.isActive:
times = copy.deepcopy(self._times)
times[-1] = (self._times[-1][0], MasterTimer.time())
diff --git a/armi/utils/customExceptions.py b/armi/utils/customExceptions.py
index 6d3fe9550c..229b31ffad 100644
--- a/armi/utils/customExceptions.py
+++ b/armi/utils/customExceptions.py
@@ -12,7 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Globally accessible exception definitions for better granularity on exception behavior and exception handling behavior."""
+"""
+Globally accessible exception definitions for better granularity on exception behavior and
+exception handling behavior.
+"""
from armi import runLog
from inspect import stack, getframeinfo
@@ -60,7 +63,7 @@ def decorated(*args, **kwargs):
def warn_when_root(func):
- r"""Decorates a method to produce a warning message only on the root node."""
+ """Decorates a method to produce a warning message only on the root node."""
return _message_when_root(warn(func))
@@ -99,15 +102,18 @@ def __init__(self, msg):
class InvalidSettingsStopProcess(SettingException):
- """Exception raised when setting file contains invalid settings and user aborts or process is uninteractive."""
+ """
+ Exception raised when setting file contains invalid settings and user aborts or process is
+ uninteractive.
+ """
def __init__(self, reader):
msg = "Input settings file {}".format(reader.inputPath)
if reader.liveVersion != reader.inputVersion:
msg += (
'\n\twas made with version "{0}" which differs from the current version "{1}." '
- 'Either create the input file with the "{1}", or switch to a development version of ARMI.'
- "".format(reader.inputVersion, reader.liveVersion)
+ 'Either create the input file with the "{1}", or switch to a development version '
+ "of ARMI.".format(reader.inputVersion, reader.liveVersion)
)
if reader.invalidSettings:
msg += (
diff --git a/armi/utils/plotting.py b/armi/utils/plotting.py
index cdfad1219f..6837fff559 100644
--- a/armi/utils/plotting.py
+++ b/armi/utils/plotting.py
@@ -1195,9 +1195,8 @@ def _makeBlockPinPatches(block, cold):
)
else:
raise TypeError(
- "Shape of the pitch-defining element is not a Square or Hex it is {}, cannot plot for this type of block".format(
- comp.shape
- )
+ "Shape of the pitch-defining element is not a Square or Hex it is "
+ f"{comp.shape}, cannot plot for this type of block."
)
patches.append(derivedPatch)
data.append(material)
diff --git a/doc/release/0.3.rst b/doc/release/0.3.rst
index e27d98c6bc..a07335bf2c 100644
--- a/doc/release/0.3.rst
+++ b/doc/release/0.3.rst
@@ -14,6 +14,11 @@ Bug Fixes
---------
#. TBD
+Quality Work
+------------
+#. We now enforce a maximum line length of 120 characters, using ``ruff``. (`PR#1646 `_)
+#. TBD
+
Changes that Affect Requirements
--------------------------------
#. TBD
@@ -21,7 +26,7 @@ Changes that Affect Requirements
ARMI v0.3.0
===========
-Release Date: 2024-01-26
+Release Date: 2024-02-02
What's new in ARMI?
-------------------
diff --git a/pyproject.toml b/pyproject.toml
index bc15238c59..e804264bca 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -135,8 +135,8 @@ required-version = "0.0.272"
# Assume Python 3.9
target-version = "py39"
-# Setting line-length to 140 (though blacks default is 88)
-line-length = 140
+# Setting line-length to 120 (though blacks default is 88)
+line-length = 120
# Enable pycodestyle (E) and Pyflakes (F) codes by default.
# D - NumPy docstring rules