diff --git a/lib/iris/experimental/ugrid/__init__.py b/lib/iris/experimental/ugrid/__init__.py index bfc570fcfd..fb607ffd6b 100644 --- a/lib/iris/experimental/ugrid/__init__.py +++ b/lib/iris/experimental/ugrid/__init__.py @@ -39,8 +39,9 @@ from ...config import get_logger from ...coords import AuxCoord, _DimensionalMetadata from ...exceptions import ConnectivityNotFoundError, CoordinateNotFoundError -from ...fileformats import cf, netcdf +from ...fileformats import cf from ...fileformats._nc_load_rules.helpers import get_attr_units, get_names +from ...fileformats.netcdf import loader as nc_loader from ...io import decode_uri, expand_filespecs from ...util import guess_coord_axis @@ -3359,7 +3360,7 @@ def load_meshes(uris, var_name=None): from iris.fileformats import FORMAT_AGENT if not PARSE_UGRID_ON_LOAD: - # Explicit behaviour, consistent with netcdf.load_cubes(), rather than + # Explicit behaviour, consistent with netcdf.loader.load_cubes(), rather than # an invisible assumption. message = ( f"PARSE_UGRID_ON_LOAD is {bool(PARSE_UGRID_ON_LOAD)}. Must be " @@ -3395,7 +3396,7 @@ def load_meshes(uris, var_name=None): else: handling_format_spec = FORMAT_AGENT.get_spec(source, None) - if handling_format_spec.handler == netcdf.load_cubes: + if handling_format_spec.handler == nc_loader.load_cubes: valid_sources.append(source) else: message = f"Ignoring non-NetCDF file: {source}" @@ -3718,7 +3719,7 @@ class CFUGridReader(cf.CFReader): ############ # Object construction. -# Helper functions, supporting netcdf.load_cubes ONLY, expected to +# Helper functions, supporting netcdf.loader.load_cubes ONLY, expected to # altered/moved when pyke is removed. @@ -3733,7 +3734,7 @@ def _build_aux_coord(coord_var, file_path): assert isinstance(coord_var, CFUGridAuxiliaryCoordinateVariable) attributes = {} attr_units = get_attr_units(coord_var, attributes) - points_data = netcdf._get_cf_var_data(coord_var, file_path) + points_data = nc_loader._get_cf_var_data(coord_var, file_path) # Bounds will not be loaded: # Bounds may be present, but the UGRID conventions state this would @@ -3785,7 +3786,7 @@ def _build_connectivity(connectivity_var, file_path, location_dims): assert isinstance(connectivity_var, CFUGridConnectivityVariable) attributes = {} attr_units = get_attr_units(connectivity_var, attributes) - indices_data = netcdf._get_cf_var_data(connectivity_var, file_path) + indices_data = nc_loader._get_cf_var_data(connectivity_var, file_path) cf_role = connectivity_var.cf_role start_index = connectivity_var.start_index @@ -3952,7 +3953,7 @@ def _build_mesh(cf, mesh_var, file_path): ) mesh_elements = filter(None, mesh_elements) for iris_object in mesh_elements: - netcdf._add_unused_attributes( + nc_loader._add_unused_attributes( iris_object, cf.cf_group[iris_object.var_name] ) diff --git a/lib/iris/fileformats/__init__.py b/lib/iris/fileformats/__init__.py index 5e03f1e4fd..9ecd8ecd09 100644 --- a/lib/iris/fileformats/__init__.py +++ b/lib/iris/fileformats/__init__.py @@ -90,7 +90,11 @@ def _load_grib(*args, **kwargs): # FORMAT_AGENT.add_spec( FormatSpecification( - "NetCDF", MagicNumber(4), 0x43444601, netcdf.load_cubes, priority=5 + "NetCDF", + MagicNumber(4), + 0x43444601, + netcdf.loader.load_cubes, + priority=5, ) ) @@ -100,7 +104,7 @@ def _load_grib(*args, **kwargs): "NetCDF 64 bit offset format", MagicNumber(4), 0x43444602, - netcdf.load_cubes, + netcdf.loader.load_cubes, priority=5, ) ) @@ -112,7 +116,7 @@ def _load_grib(*args, **kwargs): "NetCDF_v4", MagicNumber(8), 0x894844460D0A1A0A, - netcdf.load_cubes, + netcdf.loader.load_cubes, priority=5, ) ) @@ -122,7 +126,7 @@ def _load_grib(*args, **kwargs): "NetCDF OPeNDAP", UriProtocol(), lambda protocol: protocol in ["http", "https"], - netcdf.load_cubes, + netcdf.loader.load_cubes, priority=6, ) FORMAT_AGENT.add_spec(_nc_dap) diff --git a/lib/iris/fileformats/_nc_load_rules/engine.py b/lib/iris/fileformats/_nc_load_rules/engine.py index 497c2a12c9..ff35c7df4f 100644 --- a/lib/iris/fileformats/_nc_load_rules/engine.py +++ b/lib/iris/fileformats/_nc_load_rules/engine.py @@ -5,7 +5,7 @@ # licensing details. """ A simple mimic of the Pyke 'knowledge_engine', for interfacing to the routines -in 'iris.fileformats.netcdf' with minimal changes to that code. +in 'iris.fileformats.netcdf.loader' with minimal changes to that code. This allows us to replace the Pyke rules operation with the simpler pure-Python translation operations in :mod:`iris.fileformats._nc_load_rules.actions`. @@ -15,7 +15,7 @@ engine.get_kb() also returns a FactEntity object, which mimics *just enough* API of a Pyke.knowlege_base, so that we can list its case-specific facts, as -used in :meth:`iris.fileformats.netcdf._actions_activation_stats`. +used in :meth:`iris.fileformats.netcdf.loader._actions_activation_stats`. """ from .actions import run_actions @@ -66,7 +66,7 @@ class Engine: A minimal mimic of a Pyke.engine. Provides just enough API so that the existing code in - :mod:`iris.fileformats.netcdf` can interface with our new rules functions. + :mod:`iris.fileformats.netcdf.loader` can interface with our new rules functions. A list of possible fact-arglists is stored, for each of a set of fact-names (which are strings). @@ -91,7 +91,7 @@ def activate(self): set by engine.cf_var (a CFDataVariable). The rules operation itself is coded elsewhere, - in :mod:`iris.fileformats.netcdf._nc_load_rules.actions`. + in :mod:`iris.fileformats.netcdf.loader._nc_load_rules.actions`. """ run_actions(self) @@ -101,7 +101,7 @@ def get_kb(self): Get a FactEntity, which mimic (bits of) a knowledge-base. Just allowing - :meth:`iris.fileformats.netcdf._action_activation_stats` to list the + :meth:`iris.fileformats.netcdf.loader._action_activation_stats` to list the facts. """ @@ -110,7 +110,7 @@ def get_kb(self): def print_stats(self): """ No-op, called by - :meth:`iris.fileformats.netcdf._action_activation_stats`. + :meth:`iris.fileformats.netcdf.loader._action_activation_stats`. """ pass diff --git a/lib/iris/fileformats/_nc_load_rules/helpers.py b/lib/iris/fileformats/_nc_load_rules/helpers.py index a5b507d583..e7dcb97011 100644 --- a/lib/iris/fileformats/_nc_load_rules/helpers.py +++ b/lib/iris/fileformats/_nc_load_rules/helpers.py @@ -26,8 +26,7 @@ import iris.coords import iris.exceptions import iris.fileformats.cf as cf -import iris.fileformats.netcdf -from iris.fileformats.netcdf import ( +from iris.fileformats.netcdf.loader import ( UnknownCellMethodWarning, _get_cf_var_data, parse_cell_methods, diff --git a/lib/iris/fileformats/netcdf/__init__.py b/lib/iris/fileformats/netcdf/__init__.py new file mode 100644 index 0000000000..d0074920f1 --- /dev/null +++ b/lib/iris/fileformats/netcdf/__init__.py @@ -0,0 +1,28 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the LGPL license. +# See COPYING and COPYING.LESSER in the root of the repository for full +# licensing details. +""" +A package for loading and saving cubes to and from netcdf files. + +""" +from .loader import ( + NetCDFDataProxy, + OrderedAddableList, + UnknownCellMethodWarning, + load_cubes, + parse_cell_methods, +) +from .saver import CFNameCoordMap, Saver, save + +__all__ = [ + "CFNameCoordMap", + "NetCDFDataProxy", + "OrderedAddableList", + "Saver", + "UnknownCellMethodWarning", + "load_cubes", + "parse_cell_methods", + "save", +] diff --git a/lib/iris/fileformats/netcdf/loader.py b/lib/iris/fileformats/netcdf/loader.py new file mode 100644 index 0000000000..aef160ab6e --- /dev/null +++ b/lib/iris/fileformats/netcdf/loader.py @@ -0,0 +1,707 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the LGPL license. +# See COPYING and COPYING.LESSER in the root of the repository for full +# licensing details. +""" +Module to support the loading of a NetCDF file into an Iris cube. + +See also: `netCDF4 python `_. + +Also refer to document 'NetCDF Climate and Forecast (CF) Metadata Conventions'. + +""" + +import re +import warnings + +import netCDF4 +import numpy as np + +from iris._lazy_data import as_lazy_data +from iris.aux_factory import ( + HybridHeightFactory, + HybridPressureFactory, + OceanSFactory, + OceanSg1Factory, + OceanSg2Factory, + OceanSigmaFactory, + OceanSigmaZFactory, +) +import iris.config +import iris.coord_systems +import iris.coords +import iris.exceptions +import iris.fileformats.cf +import iris.io +import iris.util + +# Show actions activation statistics. +DEBUG = False + +# Configure the logger. +logger = iris.config.get_logger(__name__) + +# Pass through CF attributes: +# - comment +# - Conventions +# - flag_masks +# - flag_meanings +# - flag_values +# - history +# - institution +# - reference +# - source +# - title +# - positive +# +_CF_ATTRS = [ + "add_offset", + "ancillary_variables", + "axis", + "bounds", + "calendar", + "cell_measures", + "cell_methods", + "climatology", + "compress", + "coordinates", + "_FillValue", + "formula_terms", + "grid_mapping", + "leap_month", + "leap_year", + "long_name", + "missing_value", + "month_lengths", + "scale_factor", + "standard_error_multiplier", + "standard_name", + "units", +] + +# Cell methods. +_CM_KNOWN_METHODS = [ + "point", + "sum", + "mean", + "maximum", + "minimum", + "mid_range", + "standard_deviation", + "variance", + "mode", + "median", +] + +_CM_COMMENT = "comment" +_CM_EXTRA = "extra" +_CM_INTERVAL = "interval" +_CM_METHOD = "method" +_CM_NAME = "name" +_CM_PARSE = re.compile( + r""" + (?P([\w_]+\s*?:\s+)+) + (?P[\w_\s]+(?![\w_]*\s*?:))\s* + (?: + \(\s* + (?P[^\)]+) + \)\s* + )? + """, + re.VERBOSE, +) + + +class UnknownCellMethodWarning(Warning): + pass + + +def parse_cell_methods(nc_cell_methods): + """ + Parse a CF cell_methods attribute string into a tuple of zero or + more CellMethod instances. + + Args: + + * nc_cell_methods (str): + The value of the cell methods attribute to be parsed. + + Returns: + + * cell_methods + An iterable of :class:`iris.coords.CellMethod`. + + Multiple coordinates, intervals and comments are supported. + If a method has a non-standard name a warning will be issued, but the + results are not affected. + + """ + + cell_methods = [] + if nc_cell_methods is not None: + for m in _CM_PARSE.finditer(nc_cell_methods): + d = m.groupdict() + method = d[_CM_METHOD] + method = method.strip() + # Check validity of method, allowing for multi-part methods + # e.g. mean over years. + method_words = method.split() + if method_words[0].lower() not in _CM_KNOWN_METHODS: + msg = "NetCDF variable contains unknown cell method {!r}" + warnings.warn( + msg.format("{}".format(method_words[0])), + UnknownCellMethodWarning, + ) + d[_CM_METHOD] = method + name = d[_CM_NAME] + name = name.replace(" ", "") + name = name.rstrip(":") + d[_CM_NAME] = tuple([n for n in name.split(":")]) + interval = [] + comment = [] + if d[_CM_EXTRA] is not None: + # + # tokenise the key words and field colon marker + # + d[_CM_EXTRA] = d[_CM_EXTRA].replace( + "comment:", "<><<:>>" + ) + d[_CM_EXTRA] = d[_CM_EXTRA].replace( + "interval:", "<><<:>>" + ) + d[_CM_EXTRA] = d[_CM_EXTRA].split("<<:>>") + if len(d[_CM_EXTRA]) == 1: + comment.extend(d[_CM_EXTRA]) + else: + next_field_type = comment + for field in d[_CM_EXTRA]: + field_type = next_field_type + index = field.rfind("<>") + if index == 0: + next_field_type = interval + continue + elif index > 0: + next_field_type = interval + else: + index = field.rfind("<>") + if index == 0: + next_field_type = comment + continue + elif index > 0: + next_field_type = comment + if index != -1: + field = field[:index] + field_type.append(field.strip()) + # + # cater for a shared interval over multiple axes + # + if len(interval): + if len(d[_CM_NAME]) != len(interval) and len(interval) == 1: + interval = interval * len(d[_CM_NAME]) + # + # cater for a shared comment over multiple axes + # + if len(comment): + if len(d[_CM_NAME]) != len(comment) and len(comment) == 1: + comment = comment * len(d[_CM_NAME]) + d[_CM_INTERVAL] = tuple(interval) + d[_CM_COMMENT] = tuple(comment) + cell_method = iris.coords.CellMethod( + d[_CM_METHOD], + coords=d[_CM_NAME], + intervals=d[_CM_INTERVAL], + comments=d[_CM_COMMENT], + ) + cell_methods.append(cell_method) + return tuple(cell_methods) + + +def _actions_engine(): + # Return an 'actions engine', which provides a pyke-rules-like interface to + # the core cf translation code. + # Deferred import to avoid circularity. + import iris.fileformats._nc_load_rules.engine as nc_actions_engine + + engine = nc_actions_engine.Engine() + return engine + + +class NetCDFDataProxy: + """A reference to the data payload of a single NetCDF file variable.""" + + __slots__ = ("shape", "dtype", "path", "variable_name", "fill_value") + + def __init__(self, shape, dtype, path, variable_name, fill_value): + self.shape = shape + self.dtype = dtype + self.path = path + self.variable_name = variable_name + self.fill_value = fill_value + + @property + def ndim(self): + return len(self.shape) + + def __getitem__(self, keys): + dataset = netCDF4.Dataset(self.path) + try: + variable = dataset.variables[self.variable_name] + # Get the NetCDF variable data and slice. + var = variable[keys] + finally: + dataset.close() + return np.asanyarray(var) + + def __repr__(self): + fmt = ( + "<{self.__class__.__name__} shape={self.shape}" + " dtype={self.dtype!r} path={self.path!r}" + " variable_name={self.variable_name!r}>" + ) + return fmt.format(self=self) + + def __getstate__(self): + return {attr: getattr(self, attr) for attr in self.__slots__} + + def __setstate__(self, state): + for key, value in state.items(): + setattr(self, key, value) + + +def _assert_case_specific_facts(engine, cf, cf_group): + # Initialise a data store for built cube elements. + # This is used to patch element attributes *not* setup by the actions + # process, after the actions code has run. + engine.cube_parts["coordinates"] = [] + engine.cube_parts["cell_measures"] = [] + engine.cube_parts["ancillary_variables"] = [] + + # Assert facts for CF coordinates. + for cf_name in cf_group.coordinates.keys(): + engine.add_case_specific_fact("coordinate", (cf_name,)) + + # Assert facts for CF auxiliary coordinates. + for cf_name in cf_group.auxiliary_coordinates.keys(): + engine.add_case_specific_fact("auxiliary_coordinate", (cf_name,)) + + # Assert facts for CF cell measures. + for cf_name in cf_group.cell_measures.keys(): + engine.add_case_specific_fact("cell_measure", (cf_name,)) + + # Assert facts for CF ancillary variables. + for cf_name in cf_group.ancillary_variables.keys(): + engine.add_case_specific_fact("ancillary_variable", (cf_name,)) + + # Assert facts for CF grid_mappings. + for cf_name in cf_group.grid_mappings.keys(): + engine.add_case_specific_fact("grid_mapping", (cf_name,)) + + # Assert facts for CF labels. + for cf_name in cf_group.labels.keys(): + engine.add_case_specific_fact("label", (cf_name,)) + + # Assert facts for CF formula terms associated with the cf_group + # of the CF data variable. + + # Collect varnames of formula-root variables as we go. + # NOTE: use dictionary keys as an 'OrderedSet' + # - see: https://stackoverflow.com/a/53657523/2615050 + # This is to ensure that we can handle the resulting facts in a definite + # order, as using a 'set' led to indeterminate results. + formula_root = {} + for cf_var in cf.cf_group.formula_terms.values(): + for cf_root, cf_term in cf_var.cf_terms_by_root.items(): + # Only assert this fact if the formula root variable is + # defined in the CF group of the CF data variable. + if cf_root in cf_group: + formula_root[cf_root] = True + engine.add_case_specific_fact( + "formula_term", + (cf_var.cf_name, cf_root, cf_term), + ) + + for cf_root in formula_root.keys(): + engine.add_case_specific_fact("formula_root", (cf_root,)) + + +def _actions_activation_stats(engine, cf_name): + print("-" * 80) + print("CF Data Variable: %r" % cf_name) + + engine.print_stats() + + print("Rules Triggered:") + + for rule in sorted(list(engine.rule_triggered)): + print("\t%s" % rule) + + print("Case Specific Facts:") + kb_facts = engine.get_kb() + + for key in kb_facts.entity_lists.keys(): + for arg in kb_facts.entity_lists[key].case_specific_facts: + print("\t%s%s" % (key, arg)) + + +def _set_attributes(attributes, key, value): + """Set attributes dictionary, converting unicode strings appropriately.""" + + if isinstance(value, str): + try: + attributes[str(key)] = str(value) + except UnicodeEncodeError: + attributes[str(key)] = value + else: + attributes[str(key)] = value + + +def _add_unused_attributes(iris_object, cf_var): + """ + Populate the attributes of a cf element with the "unused" attributes + from the associated CF-netCDF variable. That is, all those that aren't CF + reserved terms. + + """ + + def attribute_predicate(item): + return item[0] not in _CF_ATTRS + + tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused()) + for attr_name, attr_value in tmpvar: + _set_attributes(iris_object.attributes, attr_name, attr_value) + + +def _get_actual_dtype(cf_var): + # Figure out what the eventual data type will be after any scale/offset + # transforms. + dummy_data = np.zeros(1, dtype=cf_var.dtype) + if hasattr(cf_var, "scale_factor"): + dummy_data = cf_var.scale_factor * dummy_data + if hasattr(cf_var, "add_offset"): + dummy_data = cf_var.add_offset + dummy_data + return dummy_data.dtype + + +def _get_cf_var_data(cf_var, filename): + # Get lazy chunked data out of a cf variable. + dtype = _get_actual_dtype(cf_var) + + # Create cube with deferred data, but no metadata + fill_value = getattr( + cf_var.cf_data, + "_FillValue", + netCDF4.default_fillvals[cf_var.dtype.str[1:]], + ) + proxy = NetCDFDataProxy( + cf_var.shape, dtype, filename, cf_var.cf_name, fill_value + ) + # Get the chunking specified for the variable : this is either a shape, or + # maybe the string "contiguous". + chunks = cf_var.cf_data.chunking() + # In the "contiguous" case, pass chunks=None to 'as_lazy_data'. + if chunks == "contiguous": + chunks = None + return as_lazy_data(proxy, chunks=chunks) + + +class OrderedAddableList(list): + # Used purely in actions debugging, to accumulate a record of which actions + # were activated. + # It replaces a set, so as to record the ordering of operations, with + # possible repeats, and it also numbers the entries. + # Actions routines invoke the 'add' method, which thus effectively converts + # a set.add into a list.append. + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._n_add = 0 + + def add(self, msg): + self._n_add += 1 + n_add = self._n_add + self.append(f"#{n_add:03d} : {msg}") + + +def _load_cube(engine, cf, cf_var, filename): + from iris.cube import Cube + + """Create the cube associated with the CF-netCDF data variable.""" + data = _get_cf_var_data(cf_var, filename) + cube = Cube(data) + + # Reset the actions engine. + engine.reset() + + # Initialise engine rule processing hooks. + engine.cf_var = cf_var + engine.cube = cube + engine.cube_parts = {} + engine.requires = {} + engine.rule_triggered = OrderedAddableList() + engine.filename = filename + + # Assert all the case-specific facts. + # This extracts 'facts' specific to this data-variable (aka cube), from + # the info supplied in the CFGroup object. + _assert_case_specific_facts(engine, cf, cf_var.cf_group) + + # Run the actions engine. + # This creates various cube elements and attaches them to the cube. + # It also records various other info on the engine, to be processed later. + engine.activate() + + # Having run the rules, now add the "unused" attributes to each cf element. + def fix_attributes_all_elements(role_name): + elements_and_names = engine.cube_parts.get(role_name, []) + + for iris_object, cf_var_name in elements_and_names: + _add_unused_attributes(iris_object, cf.cf_group[cf_var_name]) + + # Populate the attributes of all coordinates, cell-measures and ancillary-vars. + fix_attributes_all_elements("coordinates") + fix_attributes_all_elements("ancillary_variables") + fix_attributes_all_elements("cell_measures") + + # Also populate attributes of the top-level cube itself. + _add_unused_attributes(cube, cf_var) + + # Work out reference names for all the coords. + names = { + coord.var_name: coord.standard_name or coord.var_name or "unknown" + for coord in cube.coords() + } + + # Add all the cube cell methods. + cube.cell_methods = [ + iris.coords.CellMethod( + method=method.method, + intervals=method.intervals, + comments=method.comments, + coords=[ + names[coord_name] if coord_name in names else coord_name + for coord_name in method.coord_names + ], + ) + for method in cube.cell_methods + ] + + if DEBUG: + # Show activation statistics for this data-var (i.e. cube). + _actions_activation_stats(engine, cf_var.cf_name) + + return cube + + +def _load_aux_factory(engine, cube): + """ + Convert any CF-netCDF dimensionless coordinate to an AuxCoordFactory. + + """ + formula_type = engine.requires.get("formula_type") + if formula_type in [ + "atmosphere_hybrid_height_coordinate", + "atmosphere_hybrid_sigma_pressure_coordinate", + "ocean_sigma_z_coordinate", + "ocean_sigma_coordinate", + "ocean_s_coordinate", + "ocean_s_coordinate_g1", + "ocean_s_coordinate_g2", + ]: + + def coord_from_term(term): + # Convert term names to coordinates (via netCDF variable names). + name = engine.requires["formula_terms"].get(term, None) + if name is not None: + for coord, cf_var_name in engine.cube_parts["coordinates"]: + if cf_var_name == name: + return coord + warnings.warn( + "Unable to find coordinate for variable " + "{!r}".format(name) + ) + + if formula_type == "atmosphere_hybrid_height_coordinate": + delta = coord_from_term("a") + sigma = coord_from_term("b") + orography = coord_from_term("orog") + factory = HybridHeightFactory(delta, sigma, orography) + elif formula_type == "atmosphere_hybrid_sigma_pressure_coordinate": + # Hybrid pressure has two valid versions of its formula terms: + # "p0: var1 a: var2 b: var3 ps: var4" or + # "ap: var1 b: var2 ps: var3" where "ap = p0 * a" + # Attempt to get the "ap" term. + delta = coord_from_term("ap") + if delta is None: + # The "ap" term is unavailable, so try getting terms "p0" + # and "a" terms in order to derive an "ap" equivalent term. + coord_p0 = coord_from_term("p0") + if coord_p0 is not None: + if coord_p0.shape != (1,): + msg = ( + "Expecting {!r} to be a scalar reference " + "pressure coordinate, got shape {!r}".format( + coord_p0.var_name, coord_p0.shape + ) + ) + raise ValueError(msg) + if coord_p0.has_bounds(): + msg = ( + "Ignoring atmosphere hybrid sigma pressure " + "scalar coordinate {!r} bounds.".format( + coord_p0.name() + ) + ) + warnings.warn(msg) + coord_a = coord_from_term("a") + if coord_a is not None: + if coord_a.units.is_unknown(): + # Be graceful, and promote unknown to dimensionless units. + coord_a.units = "1" + delta = coord_a * coord_p0.points[0] + delta.units = coord_a.units * coord_p0.units + delta.rename("vertical pressure") + delta.var_name = "ap" + cube.add_aux_coord(delta, cube.coord_dims(coord_a)) + + sigma = coord_from_term("b") + surface_air_pressure = coord_from_term("ps") + factory = HybridPressureFactory(delta, sigma, surface_air_pressure) + elif formula_type == "ocean_sigma_z_coordinate": + sigma = coord_from_term("sigma") + eta = coord_from_term("eta") + depth = coord_from_term("depth") + depth_c = coord_from_term("depth_c") + nsigma = coord_from_term("nsigma") + zlev = coord_from_term("zlev") + factory = OceanSigmaZFactory( + sigma, eta, depth, depth_c, nsigma, zlev + ) + elif formula_type == "ocean_sigma_coordinate": + sigma = coord_from_term("sigma") + eta = coord_from_term("eta") + depth = coord_from_term("depth") + factory = OceanSigmaFactory(sigma, eta, depth) + elif formula_type == "ocean_s_coordinate": + s = coord_from_term("s") + eta = coord_from_term("eta") + depth = coord_from_term("depth") + a = coord_from_term("a") + depth_c = coord_from_term("depth_c") + b = coord_from_term("b") + factory = OceanSFactory(s, eta, depth, a, b, depth_c) + elif formula_type == "ocean_s_coordinate_g1": + s = coord_from_term("s") + c = coord_from_term("c") + eta = coord_from_term("eta") + depth = coord_from_term("depth") + depth_c = coord_from_term("depth_c") + factory = OceanSg1Factory(s, c, eta, depth, depth_c) + elif formula_type == "ocean_s_coordinate_g2": + s = coord_from_term("s") + c = coord_from_term("c") + eta = coord_from_term("eta") + depth = coord_from_term("depth") + depth_c = coord_from_term("depth_c") + factory = OceanSg2Factory(s, c, eta, depth, depth_c) + cube.add_aux_factory(factory) + + +def load_cubes(filenames, callback=None): + """ + Loads cubes from a list of NetCDF filenames/URLs. + + Args: + + * filenames (string/list): + One or more NetCDF filenames/DAP URLs to load from. + + Kwargs: + + * callback (callable function): + Function which can be passed on to :func:`iris.io.run_callback`. + + Returns: + Generator of loaded NetCDF :class:`iris.cube.Cube`. + + """ + # TODO: rationalise UGRID/mesh handling once experimental.ugrid is folded + # into standard behaviour. + # Deferred import to avoid circular imports. + from iris.experimental.ugrid import ( + PARSE_UGRID_ON_LOAD, + CFUGridReader, + _build_mesh, + _build_mesh_coords, + ) + from iris.io import run_callback + + # Create an actions engine. + engine = _actions_engine() + + if isinstance(filenames, str): + filenames = [filenames] + + for filename in filenames: + # Ingest the netCDF file. + meshes = {} + if PARSE_UGRID_ON_LOAD: + cf = CFUGridReader(filename) + + # Mesh instances are shared between file phenomena. + # TODO: more sophisticated Mesh sharing between files. + # TODO: access external Mesh cache? + mesh_vars = cf.cf_group.meshes + meshes = { + name: _build_mesh(cf, var, filename) + for name, var in mesh_vars.items() + } + else: + cf = iris.fileformats.cf.CFReader(filename) + + # Process each CF data variable. + data_variables = list(cf.cf_group.data_variables.values()) + list( + cf.cf_group.promoted.values() + ) + for cf_var in data_variables: + # cf_var-specific mesh handling, if a mesh is present. + # Build the mesh_coords *before* loading the cube - avoids + # mesh-related attributes being picked up by + # _add_unused_attributes(). + mesh_name = None + mesh = None + mesh_coords, mesh_dim = [], None + if PARSE_UGRID_ON_LOAD: + mesh_name = getattr(cf_var, "mesh", None) + if mesh_name is not None: + try: + mesh = meshes[mesh_name] + except KeyError: + message = ( + f"File does not contain mesh: '{mesh_name}' - " + f"referenced by variable: '{cf_var.cf_name}' ." + ) + logger.debug(message) + if mesh is not None: + mesh_coords, mesh_dim = _build_mesh_coords(mesh, cf_var) + + cube = _load_cube(engine, cf, cf_var, filename) + + # Attach the mesh (if present) to the cube. + for mesh_coord in mesh_coords: + cube.add_aux_coord(mesh_coord, mesh_dim) + + # Process any associated formula terms and attach + # the corresponding AuxCoordFactory. + try: + _load_aux_factory(engine, cube) + except ValueError as e: + warnings.warn("{}".format(e)) + + # Perform any user registered callback function. + cube = run_callback(callback, cube, cf_var, filename) + + # Callback mechanism may return None, which must not be yielded + if cube is None: + continue + + yield cube diff --git a/lib/iris/fileformats/netcdf.py b/lib/iris/fileformats/netcdf/saver.py similarity index 77% rename from lib/iris/fileformats/netcdf.py rename to lib/iris/fileformats/netcdf/saver.py index 7bb90665b6..753d08be37 100644 --- a/lib/iris/fileformats/netcdf.py +++ b/lib/iris/fileformats/netcdf/saver.py @@ -4,7 +4,7 @@ # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """ -Module to support the loading of a NetCDF file into an Iris cube. +Module to support the saving of Iris cubes to a NetCDF file. See also: `netCDF4 python `_. @@ -26,7 +26,6 @@ import numpy as np import numpy.ma as ma -from iris._lazy_data import as_lazy_data from iris.aux_factory import ( HybridHeightFactory, HybridPressureFactory, @@ -44,53 +43,9 @@ import iris.io import iris.util -# Show actions activation statistics. -DEBUG = False - -# Configure the logger. -logger = iris.config.get_logger(__name__) - # Standard CML spatio-temporal axis names. SPATIO_TEMPORAL_AXES = ["t", "z", "y", "x"] -# Pass through CF attributes: -# - comment -# - Conventions -# - flag_masks -# - flag_meanings -# - flag_values -# - history -# - institution -# - reference -# - source -# - title -# - positive -# -_CF_ATTRS = [ - "add_offset", - "ancillary_variables", - "axis", - "bounds", - "calendar", - "cell_measures", - "cell_methods", - "climatology", - "compress", - "coordinates", - "_FillValue", - "formula_terms", - "grid_mapping", - "leap_month", - "leap_year", - "long_name", - "missing_value", - "month_lengths", - "scale_factor", - "standard_error_multiplier", - "standard_name", - "units", -] - # CF attributes that should not be global. _CF_DATA_ATTRS = [ "flag_masks", @@ -157,141 +112,53 @@ } -# Cell methods. -_CM_KNOWN_METHODS = [ - "point", - "sum", - "mean", - "maximum", - "minimum", - "mid_range", - "standard_deviation", - "variance", - "mode", - "median", -] - -_CM_COMMENT = "comment" -_CM_EXTRA = "extra" -_CM_INTERVAL = "interval" -_CM_METHOD = "method" -_CM_NAME = "name" -_CM_PARSE = re.compile( - r""" - (?P([\w_]+\s*?:\s+)+) - (?P[\w_\s]+(?![\w_]*\s*?:))\s* - (?: - \(\s* - (?P[^\)]+) - \)\s* - )? - """, - re.VERBOSE, -) - - -class UnknownCellMethodWarning(Warning): - pass +def _bytes_if_ascii(string): + """ + Convert the given string to a byte string (str in py2k, bytes in py3k) + if the given string can be encoded to ascii, else maintain the type + of the inputted string. + Note: passing objects without an `encode` method (such as None) will + be returned by the function unchanged. -def parse_cell_methods(nc_cell_methods): """ - Parse a CF cell_methods attribute string into a tuple of zero or - more CellMethod instances. + if isinstance(string, str): + try: + return string.encode(encoding="ascii") + except (AttributeError, UnicodeEncodeError): + pass + return string - Args: - * nc_cell_methods (str): - The value of the cell methods attribute to be parsed. +def _setncattr(variable, name, attribute): + """ + Put the given attribute on the given netCDF4 Data type, casting + attributes as we go to bytes rather than unicode. - Returns: + """ + attribute = _bytes_if_ascii(attribute) + return variable.setncattr(name, attribute) - * cell_methods - An iterable of :class:`iris.coords.CellMethod`. - Multiple coordinates, intervals and comments are supported. - If a method has a non-standard name a warning will be issued, but the - results are not affected. +class _FillValueMaskCheckAndStoreTarget: + """ + To be used with da.store. Remembers whether any element was equal to a + given value and whether it was masked, before passing the chunk to the + given target. """ - cell_methods = [] - if nc_cell_methods is not None: - for m in _CM_PARSE.finditer(nc_cell_methods): - d = m.groupdict() - method = d[_CM_METHOD] - method = method.strip() - # Check validity of method, allowing for multi-part methods - # e.g. mean over years. - method_words = method.split() - if method_words[0].lower() not in _CM_KNOWN_METHODS: - msg = "NetCDF variable contains unknown cell method {!r}" - warnings.warn( - msg.format("{}".format(method_words[0])), - UnknownCellMethodWarning, - ) - d[_CM_METHOD] = method - name = d[_CM_NAME] - name = name.replace(" ", "") - name = name.rstrip(":") - d[_CM_NAME] = tuple([n for n in name.split(":")]) - interval = [] - comment = [] - if d[_CM_EXTRA] is not None: - # - # tokenise the key words and field colon marker - # - d[_CM_EXTRA] = d[_CM_EXTRA].replace( - "comment:", "<><<:>>" - ) - d[_CM_EXTRA] = d[_CM_EXTRA].replace( - "interval:", "<><<:>>" - ) - d[_CM_EXTRA] = d[_CM_EXTRA].split("<<:>>") - if len(d[_CM_EXTRA]) == 1: - comment.extend(d[_CM_EXTRA]) - else: - next_field_type = comment - for field in d[_CM_EXTRA]: - field_type = next_field_type - index = field.rfind("<>") - if index == 0: - next_field_type = interval - continue - elif index > 0: - next_field_type = interval - else: - index = field.rfind("<>") - if index == 0: - next_field_type = comment - continue - elif index > 0: - next_field_type = comment - if index != -1: - field = field[:index] - field_type.append(field.strip()) - # - # cater for a shared interval over multiple axes - # - if len(interval): - if len(d[_CM_NAME]) != len(interval) and len(interval) == 1: - interval = interval * len(d[_CM_NAME]) - # - # cater for a shared comment over multiple axes - # - if len(comment): - if len(d[_CM_NAME]) != len(comment) and len(comment) == 1: - comment = comment * len(d[_CM_NAME]) - d[_CM_INTERVAL] = tuple(interval) - d[_CM_COMMENT] = tuple(comment) - cell_method = iris.coords.CellMethod( - d[_CM_METHOD], - coords=d[_CM_NAME], - intervals=d[_CM_INTERVAL], - comments=d[_CM_COMMENT], - ) - cell_methods.append(cell_method) - return tuple(cell_methods) + def __init__(self, target, fill_value=None): + self.target = target + self.fill_value = fill_value + self.contains_value = False + self.is_masked = False + + def __setitem__(self, keys, arr): + if self.fill_value is not None: + self.contains_value = self.contains_value or self.fill_value in arr + self.is_masked = self.is_masked or ma.is_masked(arr) + self.target[keys] = arr class CFNameCoordMap: @@ -379,537 +246,6 @@ def coord(self, name): return result -def _actions_engine(): - # Return an 'actions engine', which provides a pyke-rules-like interface to - # the core cf translation code. - # Deferred import to avoid circularity. - import iris.fileformats._nc_load_rules.engine as nc_actions_engine - - engine = nc_actions_engine.Engine() - return engine - - -class NetCDFDataProxy: - """A reference to the data payload of a single NetCDF file variable.""" - - __slots__ = ("shape", "dtype", "path", "variable_name", "fill_value") - - def __init__(self, shape, dtype, path, variable_name, fill_value): - self.shape = shape - self.dtype = dtype - self.path = path - self.variable_name = variable_name - self.fill_value = fill_value - - @property - def ndim(self): - return len(self.shape) - - def __getitem__(self, keys): - dataset = netCDF4.Dataset(self.path) - try: - variable = dataset.variables[self.variable_name] - # Get the NetCDF variable data and slice. - var = variable[keys] - finally: - dataset.close() - return np.asanyarray(var) - - def __repr__(self): - fmt = ( - "<{self.__class__.__name__} shape={self.shape}" - " dtype={self.dtype!r} path={self.path!r}" - " variable_name={self.variable_name!r}>" - ) - return fmt.format(self=self) - - def __getstate__(self): - return {attr: getattr(self, attr) for attr in self.__slots__} - - def __setstate__(self, state): - for key, value in state.items(): - setattr(self, key, value) - - -def _assert_case_specific_facts(engine, cf, cf_group): - # Initialise a data store for built cube elements. - # This is used to patch element attributes *not* setup by the actions - # process, after the actions code has run. - engine.cube_parts["coordinates"] = [] - engine.cube_parts["cell_measures"] = [] - engine.cube_parts["ancillary_variables"] = [] - - # Assert facts for CF coordinates. - for cf_name in cf_group.coordinates.keys(): - engine.add_case_specific_fact("coordinate", (cf_name,)) - - # Assert facts for CF auxiliary coordinates. - for cf_name in cf_group.auxiliary_coordinates.keys(): - engine.add_case_specific_fact("auxiliary_coordinate", (cf_name,)) - - # Assert facts for CF cell measures. - for cf_name in cf_group.cell_measures.keys(): - engine.add_case_specific_fact("cell_measure", (cf_name,)) - - # Assert facts for CF ancillary variables. - for cf_name in cf_group.ancillary_variables.keys(): - engine.add_case_specific_fact("ancillary_variable", (cf_name,)) - - # Assert facts for CF grid_mappings. - for cf_name in cf_group.grid_mappings.keys(): - engine.add_case_specific_fact("grid_mapping", (cf_name,)) - - # Assert facts for CF labels. - for cf_name in cf_group.labels.keys(): - engine.add_case_specific_fact("label", (cf_name,)) - - # Assert facts for CF formula terms associated with the cf_group - # of the CF data variable. - - # Collect varnames of formula-root variables as we go. - # NOTE: use dictionary keys as an 'OrderedSet' - # - see: https://stackoverflow.com/a/53657523/2615050 - # This is to ensure that we can handle the resulting facts in a definite - # order, as using a 'set' led to indeterminate results. - formula_root = {} - for cf_var in cf.cf_group.formula_terms.values(): - for cf_root, cf_term in cf_var.cf_terms_by_root.items(): - # Only assert this fact if the formula root variable is - # defined in the CF group of the CF data variable. - if cf_root in cf_group: - formula_root[cf_root] = True - engine.add_case_specific_fact( - "formula_term", - (cf_var.cf_name, cf_root, cf_term), - ) - - for cf_root in formula_root.keys(): - engine.add_case_specific_fact("formula_root", (cf_root,)) - - -def _actions_activation_stats(engine, cf_name): - print("-" * 80) - print("CF Data Variable: %r" % cf_name) - - engine.print_stats() - - print("Rules Triggered:") - - for rule in sorted(list(engine.rule_triggered)): - print("\t%s" % rule) - - print("Case Specific Facts:") - kb_facts = engine.get_kb() - - for key in kb_facts.entity_lists.keys(): - for arg in kb_facts.entity_lists[key].case_specific_facts: - print("\t%s%s" % (key, arg)) - - -def _set_attributes(attributes, key, value): - """Set attributes dictionary, converting unicode strings appropriately.""" - - if isinstance(value, str): - try: - attributes[str(key)] = str(value) - except UnicodeEncodeError: - attributes[str(key)] = value - else: - attributes[str(key)] = value - - -def _add_unused_attributes(iris_object, cf_var): - """ - Populate the attributes of a cf element with the "unused" attributes - from the associated CF-netCDF variable. That is, all those that aren't CF - reserved terms. - - """ - - def attribute_predicate(item): - return item[0] not in _CF_ATTRS - - tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused()) - for attr_name, attr_value in tmpvar: - _set_attributes(iris_object.attributes, attr_name, attr_value) - - -def _get_actual_dtype(cf_var): - # Figure out what the eventual data type will be after any scale/offset - # transforms. - dummy_data = np.zeros(1, dtype=cf_var.dtype) - if hasattr(cf_var, "scale_factor"): - dummy_data = cf_var.scale_factor * dummy_data - if hasattr(cf_var, "add_offset"): - dummy_data = cf_var.add_offset + dummy_data - return dummy_data.dtype - - -def _get_cf_var_data(cf_var, filename): - # Get lazy chunked data out of a cf variable. - dtype = _get_actual_dtype(cf_var) - - # Create cube with deferred data, but no metadata - fill_value = getattr( - cf_var.cf_data, - "_FillValue", - netCDF4.default_fillvals[cf_var.dtype.str[1:]], - ) - proxy = NetCDFDataProxy( - cf_var.shape, dtype, filename, cf_var.cf_name, fill_value - ) - # Get the chunking specified for the variable : this is either a shape, or - # maybe the string "contiguous". - chunks = cf_var.cf_data.chunking() - # In the "contiguous" case, pass chunks=None to 'as_lazy_data'. - if chunks == "contiguous": - chunks = None - return as_lazy_data(proxy, chunks=chunks) - - -class OrderedAddableList(list): - # Used purely in actions debugging, to accumulate a record of which actions - # were activated. - # It replaces a set, so as to record the ordering of operations, with - # possible repeats, and it also numbers the entries. - # Actions routines invoke the 'add' method, which thus effectively converts - # a set.add into a list.append. - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._n_add = 0 - - def add(self, msg): - self._n_add += 1 - n_add = self._n_add - self.append(f"#{n_add:03d} : {msg}") - - -def _load_cube(engine, cf, cf_var, filename): - from iris.cube import Cube - - """Create the cube associated with the CF-netCDF data variable.""" - data = _get_cf_var_data(cf_var, filename) - cube = Cube(data) - - # Reset the actions engine. - engine.reset() - - # Initialise engine rule processing hooks. - engine.cf_var = cf_var - engine.cube = cube - engine.cube_parts = {} - engine.requires = {} - engine.rule_triggered = OrderedAddableList() - engine.filename = filename - - # Assert all the case-specific facts. - # This extracts 'facts' specific to this data-variable (aka cube), from - # the info supplied in the CFGroup object. - _assert_case_specific_facts(engine, cf, cf_var.cf_group) - - # Run the actions engine. - # This creates various cube elements and attaches them to the cube. - # It also records various other info on the engine, to be processed later. - engine.activate() - - # Having run the rules, now add the "unused" attributes to each cf element. - def fix_attributes_all_elements(role_name): - elements_and_names = engine.cube_parts.get(role_name, []) - - for iris_object, cf_var_name in elements_and_names: - _add_unused_attributes(iris_object, cf.cf_group[cf_var_name]) - - # Populate the attributes of all coordinates, cell-measures and ancillary-vars. - fix_attributes_all_elements("coordinates") - fix_attributes_all_elements("ancillary_variables") - fix_attributes_all_elements("cell_measures") - - # Also populate attributes of the top-level cube itself. - _add_unused_attributes(cube, cf_var) - - # Work out reference names for all the coords. - names = { - coord.var_name: coord.standard_name or coord.var_name or "unknown" - for coord in cube.coords() - } - - # Add all the cube cell methods. - cube.cell_methods = [ - iris.coords.CellMethod( - method=method.method, - intervals=method.intervals, - comments=method.comments, - coords=[ - names[coord_name] if coord_name in names else coord_name - for coord_name in method.coord_names - ], - ) - for method in cube.cell_methods - ] - - if DEBUG: - # Show activation statistics for this data-var (i.e. cube). - _actions_activation_stats(engine, cf_var.cf_name) - - return cube - - -def _load_aux_factory(engine, cube): - """ - Convert any CF-netCDF dimensionless coordinate to an AuxCoordFactory. - - """ - formula_type = engine.requires.get("formula_type") - if formula_type in [ - "atmosphere_hybrid_height_coordinate", - "atmosphere_hybrid_sigma_pressure_coordinate", - "ocean_sigma_z_coordinate", - "ocean_sigma_coordinate", - "ocean_s_coordinate", - "ocean_s_coordinate_g1", - "ocean_s_coordinate_g2", - ]: - - def coord_from_term(term): - # Convert term names to coordinates (via netCDF variable names). - name = engine.requires["formula_terms"].get(term, None) - if name is not None: - for coord, cf_var_name in engine.cube_parts["coordinates"]: - if cf_var_name == name: - return coord - warnings.warn( - "Unable to find coordinate for variable " - "{!r}".format(name) - ) - - if formula_type == "atmosphere_hybrid_height_coordinate": - delta = coord_from_term("a") - sigma = coord_from_term("b") - orography = coord_from_term("orog") - factory = HybridHeightFactory(delta, sigma, orography) - elif formula_type == "atmosphere_hybrid_sigma_pressure_coordinate": - # Hybrid pressure has two valid versions of its formula terms: - # "p0: var1 a: var2 b: var3 ps: var4" or - # "ap: var1 b: var2 ps: var3" where "ap = p0 * a" - # Attempt to get the "ap" term. - delta = coord_from_term("ap") - if delta is None: - # The "ap" term is unavailable, so try getting terms "p0" - # and "a" terms in order to derive an "ap" equivalent term. - coord_p0 = coord_from_term("p0") - if coord_p0 is not None: - if coord_p0.shape != (1,): - msg = ( - "Expecting {!r} to be a scalar reference " - "pressure coordinate, got shape {!r}".format( - coord_p0.var_name, coord_p0.shape - ) - ) - raise ValueError(msg) - if coord_p0.has_bounds(): - msg = ( - "Ignoring atmosphere hybrid sigma pressure " - "scalar coordinate {!r} bounds.".format( - coord_p0.name() - ) - ) - warnings.warn(msg) - coord_a = coord_from_term("a") - if coord_a is not None: - if coord_a.units.is_unknown(): - # Be graceful, and promote unknown to dimensionless units. - coord_a.units = "1" - delta = coord_a * coord_p0.points[0] - delta.units = coord_a.units * coord_p0.units - delta.rename("vertical pressure") - delta.var_name = "ap" - cube.add_aux_coord(delta, cube.coord_dims(coord_a)) - - sigma = coord_from_term("b") - surface_air_pressure = coord_from_term("ps") - factory = HybridPressureFactory(delta, sigma, surface_air_pressure) - elif formula_type == "ocean_sigma_z_coordinate": - sigma = coord_from_term("sigma") - eta = coord_from_term("eta") - depth = coord_from_term("depth") - depth_c = coord_from_term("depth_c") - nsigma = coord_from_term("nsigma") - zlev = coord_from_term("zlev") - factory = OceanSigmaZFactory( - sigma, eta, depth, depth_c, nsigma, zlev - ) - elif formula_type == "ocean_sigma_coordinate": - sigma = coord_from_term("sigma") - eta = coord_from_term("eta") - depth = coord_from_term("depth") - factory = OceanSigmaFactory(sigma, eta, depth) - elif formula_type == "ocean_s_coordinate": - s = coord_from_term("s") - eta = coord_from_term("eta") - depth = coord_from_term("depth") - a = coord_from_term("a") - depth_c = coord_from_term("depth_c") - b = coord_from_term("b") - factory = OceanSFactory(s, eta, depth, a, b, depth_c) - elif formula_type == "ocean_s_coordinate_g1": - s = coord_from_term("s") - c = coord_from_term("c") - eta = coord_from_term("eta") - depth = coord_from_term("depth") - depth_c = coord_from_term("depth_c") - factory = OceanSg1Factory(s, c, eta, depth, depth_c) - elif formula_type == "ocean_s_coordinate_g2": - s = coord_from_term("s") - c = coord_from_term("c") - eta = coord_from_term("eta") - depth = coord_from_term("depth") - depth_c = coord_from_term("depth_c") - factory = OceanSg2Factory(s, c, eta, depth, depth_c) - cube.add_aux_factory(factory) - - -def load_cubes(filenames, callback=None): - """ - Loads cubes from a list of NetCDF filenames/URLs. - - Args: - - * filenames (string/list): - One or more NetCDF filenames/DAP URLs to load from. - - Kwargs: - - * callback (callable function): - Function which can be passed on to :func:`iris.io.run_callback`. - - Returns: - Generator of loaded NetCDF :class:`iris.cube.Cube`. - - """ - # TODO: rationalise UGRID/mesh handling once experimental.ugrid is folded - # into standard behaviour. - # Deferred import to avoid circular imports. - from iris.experimental.ugrid import ( - PARSE_UGRID_ON_LOAD, - CFUGridReader, - _build_mesh_coords, - _meshes_from_cf, - ) - from iris.io import run_callback - - # Create an actions engine. - engine = _actions_engine() - - if isinstance(filenames, str): - filenames = [filenames] - - for filename in filenames: - # Ingest the netCDF file. - meshes = {} - if PARSE_UGRID_ON_LOAD: - cf = CFUGridReader(filename) - meshes = _meshes_from_cf(cf) - else: - cf = iris.fileformats.cf.CFReader(filename) - - # Process each CF data variable. - data_variables = list(cf.cf_group.data_variables.values()) + list( - cf.cf_group.promoted.values() - ) - for cf_var in data_variables: - # cf_var-specific mesh handling, if a mesh is present. - # Build the mesh_coords *before* loading the cube - avoids - # mesh-related attributes being picked up by - # _add_unused_attributes(). - mesh_name = None - mesh = None - mesh_coords, mesh_dim = [], None - if PARSE_UGRID_ON_LOAD: - mesh_name = getattr(cf_var, "mesh", None) - if mesh_name is not None: - try: - mesh = meshes[mesh_name] - except KeyError: - message = ( - f"File does not contain mesh: '{mesh_name}' - " - f"referenced by variable: '{cf_var.cf_name}' ." - ) - logger.debug(message) - if mesh is not None: - mesh_coords, mesh_dim = _build_mesh_coords(mesh, cf_var) - - cube = _load_cube(engine, cf, cf_var, filename) - - # Attach the mesh (if present) to the cube. - for mesh_coord in mesh_coords: - cube.add_aux_coord(mesh_coord, mesh_dim) - - # Process any associated formula terms and attach - # the corresponding AuxCoordFactory. - try: - _load_aux_factory(engine, cube) - except ValueError as e: - warnings.warn("{}".format(e)) - - # Perform any user registered callback function. - cube = run_callback(callback, cube, cf_var, filename) - - # Callback mechanism may return None, which must not be yielded - if cube is None: - continue - - yield cube - - -def _bytes_if_ascii(string): - """ - Convert the given string to a byte string (str in py2k, bytes in py3k) - if the given string can be encoded to ascii, else maintain the type - of the inputted string. - - Note: passing objects without an `encode` method (such as None) will - be returned by the function unchanged. - - """ - if isinstance(string, str): - try: - return string.encode(encoding="ascii") - except (AttributeError, UnicodeEncodeError): - pass - return string - - -def _setncattr(variable, name, attribute): - """ - Put the given attribute on the given netCDF4 Data type, casting - attributes as we go to bytes rather than unicode. - - """ - attribute = _bytes_if_ascii(attribute) - return variable.setncattr(name, attribute) - - -class _FillValueMaskCheckAndStoreTarget: - """ - To be used with da.store. Remembers whether any element was equal to a - given value and whether it was masked, before passing the chunk to the - given target. - - """ - - def __init__(self, target, fill_value=None): - self.target = target - self.fill_value = fill_value - self.contains_value = False - self.is_masked = False - - def __setitem__(self, keys, arr): - if self.fill_value is not None: - self.contains_value = self.contains_value or self.fill_value in arr - self.is_masked = self.is_masked or ma.is_masked(arr) - self.target[keys] = arr - - class Saver: """A manager for saving netcdf files.""" @@ -978,7 +314,7 @@ def __init__(self, filename, netcdf_format): def __enter__(self): return self - def __exit__(self, type, value, traceback): + def __exit__(self, exc_type, exc_value, traceback): """Flush any buffered data to the CF-netCDF file before closing.""" self._dataset.sync() diff --git a/lib/iris/io/__init__.py b/lib/iris/io/__init__.py index 64501afd1e..a1463c8e64 100644 --- a/lib/iris/io/__init__.py +++ b/lib/iris/io/__init__.py @@ -275,7 +275,7 @@ def _check_init_savers(): _savers.update( { "pp": pp.save, - "nc": netcdf.save, + "nc": netcdf.saver.save, "dot": _dot_save, "dotpng": _dot_save_png, "grib2": _grib_save, diff --git a/lib/iris/tests/integration/test_netcdf.py b/lib/iris/tests/integration/test_netcdf.py index 3ff5bbb19d..a63f6d1954 100644 --- a/lib/iris/tests/integration/test_netcdf.py +++ b/lib/iris/tests/integration/test_netcdf.py @@ -26,11 +26,8 @@ import iris from iris.coords import CellMethod from iris.cube import Cube, CubeList -from iris.fileformats.netcdf import ( - CF_CONVENTIONS_VERSION, - Saver, - UnknownCellMethodWarning, -) +from iris.fileformats.netcdf.loader import UnknownCellMethodWarning +from iris.fileformats.netcdf.saver import CF_CONVENTIONS_VERSION, Saver import iris.tests.stock as stock diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/endian.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/endian.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/endian.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/endian.cdl diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/mercator.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/mercator.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/mercator.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/mercator.cdl diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/mercator_no_ellipsoid.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/mercator_no_ellipsoid.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/mercator_no_ellipsoid.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/mercator_no_ellipsoid.cdl diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/stereographic.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/stereographic.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/stereographic.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/stereographic.cdl diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/stereographic_no_ellipsoid.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/stereographic_no_ellipsoid.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/stereographic_no_ellipsoid.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/stereographic_no_ellipsoid.cdl diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/transverse_mercator.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/transverse_mercator.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/transverse_mercator.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/transverse_mercator.cdl diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/transverse_mercator_no_ellipsoid.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/transverse_mercator_no_ellipsoid.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/transverse_mercator_no_ellipsoid.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/transverse_mercator_no_ellipsoid.cdl diff --git a/lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/with_climatology.cdl b/lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/with_climatology.cdl similarity index 100% rename from lib/iris/tests/results/unit/fileformats/netcdf/Saver/write/with_climatology.cdl rename to lib/iris/tests/results/unit/fileformats/netcdf/saver/Saver/write/with_climatology.cdl diff --git a/lib/iris/tests/test_netcdf.py b/lib/iris/tests/test_netcdf.py index 969d987af3..8baadbb48a 100644 --- a/lib/iris/tests/test_netcdf.py +++ b/lib/iris/tests/test_netcdf.py @@ -28,7 +28,7 @@ import iris.analysis.trajectory import iris.coord_systems as icoord_systems from iris.fileformats._nc_load_rules import helpers as ncload_helpers -import iris.fileformats.netcdf +from iris.fileformats.netcdf.saver import Saver import iris.std_names import iris.tests.stock as stock import iris.util @@ -345,7 +345,7 @@ def test_noexist_directory(self): dir_name = os.path.join(tempfile.gettempdir(), "non_existent_dir") fnme = os.path.join(dir_name, "tmp.nc") with self.assertRaises(IOError): - with iris.fileformats.netcdf.Saver(fnme, "NETCDF4"): + with Saver(fnme, "NETCDF4"): pass def test_bad_permissions(self): @@ -359,7 +359,7 @@ def test_bad_permissions(self): try: os.chmod(dir_name, stat.S_IREAD) with self.assertRaises(PermissionError): - iris.fileformats.netcdf.Saver(fname, "NETCDF4") + Saver(fname, "NETCDF4") self.assertFalse(os.path.exists(fname)) finally: shutil.rmtree(dir_name) diff --git a/lib/iris/tests/unit/fileformats/nc_load_rules/actions/__init__.py b/lib/iris/tests/unit/fileformats/nc_load_rules/actions/__init__.py index 717e5b5c41..4ec6c6af99 100644 --- a/lib/iris/tests/unit/fileformats/nc_load_rules/actions/__init__.py +++ b/lib/iris/tests/unit/fileformats/nc_load_rules/actions/__init__.py @@ -7,7 +7,7 @@ Unit tests for the module :mod:`iris.fileformats._nc_load_rules.actions`. This module provides the engine.activate() call used in the function -`iris.fileformats.netcdf._load_cube`. +`iris.fileformats.netcdf.loader._load_cube`. """ from pathlib import Path @@ -17,8 +17,8 @@ import iris.fileformats._nc_load_rules.engine from iris.fileformats.cf import CFReader -import iris.fileformats.netcdf -from iris.fileformats.netcdf import _load_cube +import iris.fileformats.netcdf.loader +from iris.fileformats.netcdf.loader import _load_cube """ Notes on testing method. @@ -92,11 +92,11 @@ def load_cube_from_cdl(self, cdl_string, cdl_path, nc_path): # Grab a data variable : FOR NOW always grab the 'phenom' variable. cf_var = cf.cf_group.data_variables["phenom"] - engine = iris.fileformats.netcdf._actions_engine() + engine = iris.fileformats.netcdf.loader._actions_engine() # If debug enabled, switch on the activation summary debug output. # Use 'patch' so it is restored after the test. - self.patch("iris.fileformats.netcdf.DEBUG", self.debug) + self.patch("iris.fileformats.netcdf.loader.DEBUG", self.debug) # Call the main translation function to load a single cube. # _load_cube establishes per-cube facts, activates rules and @@ -107,7 +107,7 @@ def load_cube_from_cdl(self, cdl_string, cdl_path, nc_path): # by the rules operation. # Unlike the other translations, _load_cube does *not* convert this # information into actual cube elements. That is instead done by - # `iris.fileformats.netcdf._load_aux_factory`. + # `iris.fileformats.netcdf.loader._load_aux_factory`. # For rules testing, it is anyway more convenient to deal with the raw # data, as each factory type has different validity requirements to # build it, and none of that is relevant to the rules operation. diff --git a/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_build_auxiliary_coordinate.py b/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_build_auxiliary_coordinate.py index 95f892454b..9cb9fc5bda 100644 --- a/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_build_auxiliary_coordinate.py +++ b/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_build_auxiliary_coordinate.py @@ -77,7 +77,7 @@ def patched__getitem__(proxy_self, keys): raise RuntimeError() self.patch( - "iris.fileformats.netcdf.NetCDFDataProxy.__getitem__", + "iris.fileformats.netcdf.loader.NetCDFDataProxy.__getitem__", new=patched__getitem__, ) @@ -180,7 +180,7 @@ def patched__getitem__(proxy_self, keys): raise RuntimeError() self.deferred_load_patch = mock.patch( - "iris.fileformats.netcdf.NetCDFDataProxy.__getitem__", + "iris.fileformats.netcdf.loader.NetCDFDataProxy.__getitem__", new=patched__getitem__, ) @@ -264,7 +264,7 @@ def patched__getitem__(proxy_self, keys): raise RuntimeError() self.patch( - "iris.fileformats.netcdf.NetCDFDataProxy.__getitem__", + "iris.fileformats.netcdf.loader.NetCDFDataProxy.__getitem__", new=patched__getitem__, ) diff --git a/lib/iris/tests/unit/fileformats/netcdf/loader/__init__.py b/lib/iris/tests/unit/fileformats/netcdf/loader/__init__.py new file mode 100644 index 0000000000..7c2ae96158 --- /dev/null +++ b/lib/iris/tests/unit/fileformats/netcdf/loader/__init__.py @@ -0,0 +1,6 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the LGPL license. +# See COPYING and COPYING.LESSER in the root of the repository for full +# licensing details. +"""Unit tests for the :mod:`iris.fileformats.netcdf.loader` module.""" diff --git a/lib/iris/tests/unit/fileformats/netcdf/test__get_cf_var_data.py b/lib/iris/tests/unit/fileformats/netcdf/loader/test__get_cf_var_data.py similarity index 94% rename from lib/iris/tests/unit/fileformats/netcdf/test__get_cf_var_data.py rename to lib/iris/tests/unit/fileformats/netcdf/loader/test__get_cf_var_data.py index 1bf39591d2..597dcd612f 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test__get_cf_var_data.py +++ b/lib/iris/tests/unit/fileformats/netcdf/loader/test__get_cf_var_data.py @@ -3,7 +3,7 @@ # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. -"""Unit tests for the `iris.fileformats.netcdf._get_cf_var_data` function.""" +"""Unit tests for the `iris.fileformats.netcdf.loader._get_cf_var_data` function.""" # Import iris.tests first so that some things can be initialised before # importing anything else. @@ -16,7 +16,7 @@ from iris._lazy_data import _optimum_chunksize import iris.fileformats.cf -from iris.fileformats.netcdf import _get_cf_var_data +from iris.fileformats.netcdf.loader import _get_cf_var_data class Test__get_cf_var_data(tests.IrisTest): diff --git a/lib/iris/tests/unit/fileformats/netcdf/test__load_aux_factory.py b/lib/iris/tests/unit/fileformats/netcdf/loader/test__load_aux_factory.py similarity index 98% rename from lib/iris/tests/unit/fileformats/netcdf/test__load_aux_factory.py rename to lib/iris/tests/unit/fileformats/netcdf/loader/test__load_aux_factory.py index eb9da6b5d6..6504556a43 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test__load_aux_factory.py +++ b/lib/iris/tests/unit/fileformats/netcdf/loader/test__load_aux_factory.py @@ -3,7 +3,7 @@ # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. -"""Unit tests for the `iris.fileformats.netcdf._load_aux_factory` function.""" +"""Unit tests for the `iris.fileformats.netcdf.loader._load_aux_factory` function.""" # Import iris.tests first so that some things can be initialised before # importing anything else. @@ -16,7 +16,7 @@ from iris.coords import DimCoord from iris.cube import Cube -from iris.fileformats.netcdf import _load_aux_factory +from iris.fileformats.netcdf.loader import _load_aux_factory class TestAtmosphereHybridSigmaPressureCoordinate(tests.IrisTest): diff --git a/lib/iris/tests/unit/fileformats/netcdf/test__load_cube.py b/lib/iris/tests/unit/fileformats/netcdf/loader/test__load_cube.py similarity index 95% rename from lib/iris/tests/unit/fileformats/netcdf/test__load_cube.py rename to lib/iris/tests/unit/fileformats/netcdf/loader/test__load_cube.py index 0e98eec916..855be3f6ea 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test__load_cube.py +++ b/lib/iris/tests/unit/fileformats/netcdf/loader/test__load_cube.py @@ -3,7 +3,7 @@ # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. -"""Unit tests for the `iris.fileformats.netcdf._load_cube` function.""" +"""Unit tests for the `iris.fileformats.netcdf.loader._load_cube` function.""" # Import iris.tests first so that some things can be initialised before # importing anything else. @@ -15,7 +15,7 @@ from iris.coords import DimCoord import iris.fileformats.cf -from iris.fileformats.netcdf import _load_cube +from iris.fileformats.netcdf.loader import _load_cube class TestCoordAttributes(tests.IrisTest): @@ -28,7 +28,7 @@ def _patcher(engine, cf, cf_group): engine.cube_parts["coordinates"] = coordinates def setUp(self): - this = "iris.fileformats.netcdf._assert_case_specific_facts" + this = "iris.fileformats.netcdf.loader._assert_case_specific_facts" patch = mock.patch(this, side_effect=self._patcher) patch.start() self.addCleanup(patch.stop) @@ -112,7 +112,7 @@ def test_flag_pass_thru_multi(self): class TestCubeAttributes(tests.IrisTest): def setUp(self): - this = "iris.fileformats.netcdf._assert_case_specific_facts" + this = "iris.fileformats.netcdf.loader._assert_case_specific_facts" patch = mock.patch(this) patch.start() self.addCleanup(patch.stop) diff --git a/lib/iris/tests/unit/fileformats/netcdf/test_load_cubes.py b/lib/iris/tests/unit/fileformats/netcdf/loader/test_load_cubes.py similarity index 96% rename from lib/iris/tests/unit/fileformats/netcdf/test_load_cubes.py rename to lib/iris/tests/unit/fileformats/netcdf/loader/test_load_cubes.py index c4c868cd59..e73147b10a 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test_load_cubes.py +++ b/lib/iris/tests/unit/fileformats/netcdf/loader/test_load_cubes.py @@ -4,7 +4,7 @@ # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """ -Unit tests for the :func:`iris.fileformats.netcdf.load_cubes` function. +Unit tests for the :func:`iris.fileformats.netcdf.loader.load_cubes` function. todo: migrate the remaining unit-esque tests from iris.tests.test_netcdf, switching to use netcdf.load_cubes() instead of iris.load()/load_cube(). @@ -21,7 +21,7 @@ from iris.coords import AncillaryVariable, CellMeasure from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD, MeshCoord -from iris.fileformats.netcdf import load_cubes, logger +from iris.fileformats.netcdf.loader import load_cubes, logger # Import iris.tests first so that some things can be initialised before # importing anything else. @@ -77,7 +77,7 @@ def test_ancillary_variables(self): """ nc_path = cdl_to_nc(ref_cdl) - # Load with iris.fileformats.netcdf.load_cubes, and check expected content. + # Load with load_cubes, and check expected content. cubes = list(load_cubes(nc_path)) self.assertEqual(len(cubes), 1) avs = cubes[0].ancillary_variables() @@ -116,7 +116,7 @@ def test_status_flags(self): """ nc_path = cdl_to_nc(ref_cdl) - # Load with iris.fileformats.netcdf.load_cubes, and check expected content. + # Load with load_cubes, and check expected content. cubes = list(load_cubes(nc_path)) self.assertEqual(len(cubes), 1) avs = cubes[0].ancillary_variables() @@ -163,7 +163,7 @@ def test_cell_measures(self): """ nc_path = cdl_to_nc(ref_cdl) - # Load with iris.fileformats.netcdf.load_cubes, and check expected content. + # Load with load_cubes, and check expected content. cubes = list(load_cubes(nc_path)) self.assertEqual(len(cubes), 1) cms = cubes[0].cell_measures() @@ -207,7 +207,7 @@ def test_default_units(self): """ nc_path = cdl_to_nc(ref_cdl) - # Load with iris.fileformats.netcdf.load_cubes, and check expected content. + # Load with load_cubes, and check expected content. cubes = list(load_cubes(nc_path)) self.assertEqual(len(cubes), 1) self.assertEqual(cubes[0].units, as_unit("unknown")) diff --git a/lib/iris/tests/unit/fileformats/netcdf/test_parse_cell_methods.py b/lib/iris/tests/unit/fileformats/netcdf/loader/test_parse_cell_methods.py similarity index 97% rename from lib/iris/tests/unit/fileformats/netcdf/test_parse_cell_methods.py rename to lib/iris/tests/unit/fileformats/netcdf/loader/test_parse_cell_methods.py index 9c4fbf622b..5eb93e76fb 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test_parse_cell_methods.py +++ b/lib/iris/tests/unit/fileformats/netcdf/loader/test_parse_cell_methods.py @@ -4,7 +4,7 @@ # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """ -Unit tests for :func:`iris.fileformats.netcdf.parse_cell_methods`. +Unit tests for :func:`iris.fileformats.netcdf.loader.parse_cell_methods`. """ @@ -15,7 +15,7 @@ from unittest import mock from iris.coords import CellMethod -from iris.fileformats.netcdf import parse_cell_methods +from iris.fileformats.netcdf.loader import parse_cell_methods class Test(tests.IrisTest): diff --git a/lib/iris/tests/unit/fileformats/netcdf/saver/__init__.py b/lib/iris/tests/unit/fileformats/netcdf/saver/__init__.py new file mode 100644 index 0000000000..a68d5fc5d0 --- /dev/null +++ b/lib/iris/tests/unit/fileformats/netcdf/saver/__init__.py @@ -0,0 +1,6 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the LGPL license. +# See COPYING and COPYING.LESSER in the root of the repository for full +# licensing details. +"""Unit tests for the :mod:`iris.fileformats.netcdf.saver` module.""" diff --git a/lib/iris/tests/unit/fileformats/netcdf/test_Saver.py b/lib/iris/tests/unit/fileformats/netcdf/saver/test_Saver.py similarity index 99% rename from lib/iris/tests/unit/fileformats/netcdf/test_Saver.py rename to lib/iris/tests/unit/fileformats/netcdf/saver/test_Saver.py index 2b0372dfa9..9ff6c396da 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test_Saver.py +++ b/lib/iris/tests/unit/fileformats/netcdf/saver/test_Saver.py @@ -32,7 +32,7 @@ ) from iris.coords import DimCoord from iris.cube import Cube -from iris.fileformats.netcdf import Saver +from iris.fileformats.netcdf.saver import Saver import iris.tests.stock as stock @@ -185,7 +185,7 @@ def test_big_endian(self): def test_zlib(self): cube = self._simple_cube(">f4") - api = self.patch("iris.fileformats.netcdf.netCDF4") + api = self.patch("iris.fileformats.netcdf.saver.netCDF4") with Saver("/dummy/path", "NETCDF4") as saver: saver.write(cube, zlib=True) dataset = api.Dataset.return_value diff --git a/lib/iris/tests/unit/fileformats/netcdf/test__FillValueMaskCheckAndStoreTarget.py b/lib/iris/tests/unit/fileformats/netcdf/saver/test__FillValueMaskCheckAndStoreTarget.py similarity index 95% rename from lib/iris/tests/unit/fileformats/netcdf/test__FillValueMaskCheckAndStoreTarget.py rename to lib/iris/tests/unit/fileformats/netcdf/saver/test__FillValueMaskCheckAndStoreTarget.py index 01ba7ff38d..43dcb25be9 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test__FillValueMaskCheckAndStoreTarget.py +++ b/lib/iris/tests/unit/fileformats/netcdf/saver/test__FillValueMaskCheckAndStoreTarget.py @@ -4,7 +4,7 @@ # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """ -Unit tests for the `iris.fileformats.netcdf._FillValueMaskCheckAndStoreTarget` +Unit tests for the `iris.fileformats.netcdf.saver._FillValueMaskCheckAndStoreTarget` class. """ @@ -17,7 +17,7 @@ import numpy as np -from iris.fileformats.netcdf import _FillValueMaskCheckAndStoreTarget +from iris.fileformats.netcdf.saver import _FillValueMaskCheckAndStoreTarget class Test__FillValueMaskCheckAndStoreTarget(tests.IrisTest): diff --git a/lib/iris/tests/unit/fileformats/netcdf/test_save.py b/lib/iris/tests/unit/fileformats/netcdf/saver/test_save.py similarity index 93% rename from lib/iris/tests/unit/fileformats/netcdf/test_save.py rename to lib/iris/tests/unit/fileformats/netcdf/saver/test_save.py index 830d8c5e52..b1def41463 100644 --- a/lib/iris/tests/unit/fileformats/netcdf/test_save.py +++ b/lib/iris/tests/unit/fileformats/netcdf/saver/test_save.py @@ -3,7 +3,7 @@ # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. -"""Unit tests for the `iris.fileformats.netcdf.save` function.""" +"""Unit tests for the `iris.fileformats.netcdf.saver.save` function.""" # Import iris.tests first so that some things can be initialised before # importing anything else. @@ -17,7 +17,7 @@ import iris from iris.coords import DimCoord from iris.cube import Cube, CubeList -from iris.fileformats.netcdf import CF_CONVENTIONS_VERSION, save +from iris.fileformats.netcdf.saver import CF_CONVENTIONS_VERSION, save from iris.tests.stock import lat_lon_cube @@ -138,7 +138,7 @@ def test_None(self): # Test that when no fill_value argument is passed, the fill_value # argument to Saver.write is None or not present. cubes = self._make_cubes() - with mock.patch("iris.fileformats.netcdf.Saver") as Saver: + with mock.patch("iris.fileformats.netcdf.saver.Saver") as Saver: save(cubes, "dummy.nc") # Get the Saver.write mock @@ -156,7 +156,7 @@ def test_single(self): # that value is passed to each call to Saver.write cubes = self._make_cubes() fill_value = 12345.0 - with mock.patch("iris.fileformats.netcdf.Saver") as Saver: + with mock.patch("iris.fileformats.netcdf.saver.Saver") as Saver: save(cubes, "dummy.nc", fill_value=fill_value) # Get the Saver.write mock @@ -173,7 +173,7 @@ def test_multiple(self): # each element is passed to separate calls to Saver.write cubes = self._make_cubes() fill_values = [123.0, 456.0, 789.0] - with mock.patch("iris.fileformats.netcdf.Saver") as Saver: + with mock.patch("iris.fileformats.netcdf.saver.Saver") as Saver: save(cubes, "dummy.nc", fill_value=fill_values) # Get the Saver.write mock @@ -190,7 +190,7 @@ def test_single_string(self): # that value is passed to calls to Saver.write cube = Cube(["abc", "def", "hij"]) fill_value = "xyz" - with mock.patch("iris.fileformats.netcdf.Saver") as Saver: + with mock.patch("iris.fileformats.netcdf.saver.Saver") as Saver: save(cube, "dummy.nc", fill_value=fill_value) # Get the Saver.write mock @@ -206,7 +206,7 @@ def test_multi_wrong_length(self): # is passed as the fill_value argument, an error is raised cubes = self._make_cubes() fill_values = [1.0, 2.0, 3.0, 4.0] - with mock.patch("iris.fileformats.netcdf.Saver"): + with mock.patch("iris.fileformats.netcdf.saver.Saver"): with self.assertRaises(ValueError): save(cubes, "dummy.nc", fill_value=fill_values)