diff --git a/.gitignore b/.gitignore index d0c032e8cf..98af3a6b93 100644 --- a/.gitignore +++ b/.gitignore @@ -33,6 +33,9 @@ htmlcov #Translations *.mo +#Sphinx +doc/source/_build/* + #Mr Developer .mr.developer.cfg diff --git a/.travis.yml b/.travis.yml index 0c6d28337b..438776b36d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -65,6 +65,7 @@ install: script: - pytest --cov=satpy satpy/tests - coverage run -a --source=satpy -m behave satpy/tests/features --tags=-download +- if [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then coverage run -a --source=satpy -m behave satpy/tests/features; fi after_success: - if [[ $PYTHON_VERSION == 3.8 ]]; then coveralls; codecov; fi deploy: diff --git a/doc/source/composites.rst b/doc/source/composites.rst index 70b727e9c7..7520e4a556 100644 --- a/doc/source/composites.rst +++ b/doc/source/composites.rst @@ -2,6 +2,14 @@ Composites ========== +Composites are defined as arrays of data that are created by processing and/or +combining one or multiple data arrays (prerequisites) together. + +Composites are generated in satpy using Compositor classes. The attributes of the +resulting composites are usually a combination of the prerequisites' attributes and +the key/values of the DataID used to identify it. + + Built-in Compositors ==================== @@ -430,7 +438,7 @@ Enhancing the images - palettize - three_d_effect - btemp_threshold - + .. todo:: Should this be in another file/page? diff --git a/doc/source/dev_guide/custom_reader.rst b/doc/source/dev_guide/custom_reader.rst index 1d779c9144..5eb4f68831 100644 --- a/doc/source/dev_guide/custom_reader.rst +++ b/doc/source/dev_guide/custom_reader.rst @@ -122,6 +122,10 @@ The parameters to provide in this section are: sensors: [seviri] reader: !!python/name:satpy.readers.yaml_reader.FileYAMLReader +Optionally, if you need to customize the `DataID` for this reader, you can provide the +relevant keys with a `data_identification_keys` item here. See the :doc:`satpy_internals` +section for more information. + .. _custom_reader_file_types_section: The ``file_types`` section @@ -476,7 +480,7 @@ needs to implement a few methods: in the example below. - the ``get_area_def`` method, that takes as single argument the - :class:`~satpy.dataset.DatasetID` for which we want + :class:`~satpy.dataset.DataID` for which we want the area. It should return a :class:`~pyresample.geometry.AreaDefinition` object. For data that cannot be geolocated with an area definition, the pixel coordinates will be loaded using the @@ -539,7 +543,7 @@ One way of implementing a file handler is shown below: self.nc = None def get_dataset(self, dataset_id, dataset_info): - if dataset_id.calibration != 'radiance': + if dataset_id['calibration'] != 'radiance': # TODO: implement calibration to reflectance or brightness temperature return if self.nc is None: diff --git a/doc/source/dev_guide/index.rst b/doc/source/dev_guide/index.rst index f41808fe69..f7e5892152 100644 --- a/doc/source/dev_guide/index.rst +++ b/doc/source/dev_guide/index.rst @@ -15,6 +15,7 @@ at the pages listed below. xarray_migration custom_reader plugins + satpy_internals Coding guidelines ================= diff --git a/doc/source/dev_guide/satpy_internals.rst b/doc/source/dev_guide/satpy_internals.rst new file mode 100644 index 0000000000..9bdbfb55cf --- /dev/null +++ b/doc/source/dev_guide/satpy_internals.rst @@ -0,0 +1,157 @@ +====================================================== + Satpy internal workings: having a look under the hood +====================================================== + +Querying and identifying data arrays +==================================== + +DataQuery +--------- + +The loading of data in Satpy is usually done through giving the name or the wavelength of the data arrays we are interested +in. This way, the highest, most calibrated data arrays is often returned. + +However, in some cases, we need more control over the loading of the data arrays. The way to accomplish this is to load +data arrays using queries, eg:: + + scn.load([DataQuery(name='channel1', resolution=400)] + +Here a data array with name `channel1` and of resolution `400` will be loaded if available. + +Note that None is not a valid value, and keys having a value set to None will simply be ignored. + +If one wants to use wildcards to query data, just provide `'*'`, eg:: + + scn.load([DataQuery(name='channel1', resolution=400, calibration='*')] + +Alternatively, one can provide a list as parameter to query data, like this:: + + scn.load([DataQuery(name='channel1', resolution=[400, 800])] + + + +DataID +------ + +Satpy stores loaded data arrays in a special dictionary (`DatasetDict`) inside scene objects. +In order to identify each data array uniquely, Satpy is assigning an ID to each data array, which is then used as the key in +the scene object. These IDs are of type `DataID` and are immutable. They are not supposed to be used by regular users and should only be +created in special circumstances. Satpy should take care of creating and assigning these automatically. They are also stored in the +`attrs` of each data array as `_satpy_id`. + +Default and custom metadata keys +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One thing however that the user has control over is which metadata keys are relevant to which datasets. Satpy provides two default sets +of metadata key (or ID keys), one for regular imager bands, and the other for composites. +The first one contains: name, wavelength, resolution, calibration, modifiers. +The second one contains: name, resolution. + +As an example here is the definition of the first one in yaml: + + .. code-block:: yaml + + data_identification_keys: + name: + required: true + wavelength: + type: !!python/name:satpy.dataset.WavelengthRange + resolution: + transitive: true + calibration: + enum: + - reflectance + - brightness_temperature + - radiance + - counts + modifiers: + required: true + default: [] + type: !!python/name:satpy.dataset.ModifierTuple + +To create a new set, the user can provide indications in the relevant yaml file. +It has to be provided in header of the reader configuration file, under the `reader` +section, as `data_identification_keys`. Each key under this is the name of relevant +metadata key that will used to find relevant information in the attributes of the data +arrays. Under each of this, a few options are available: + + - `required`: if the item is required, False by default + - `type`: the type to use. More on this further down. + - `enum`: if the item has to be limited to a finite number of options, an enum can be used. + Be sure to place the options in the order of preference, with the most desirable option on top. + - `default`: the default value to assign to the item if nothing (or None) is provided. If this + option isn't provided, the key will simply be omited if it is not present in the attrs or if it + is None. It will be passed to the type's `convert` method if available. + - `transitive`: whether the key is to be passed when looking for dependencies. Here for example, + a composite that has to be at a certain resolution will pass this resolution requirement to its + dependencies. + + +If the definition of the metadata keys need to be done in python rather than in a yaml file, it will +be a dictionary very similar to the yaml code. Here is the same example as above in python: + + .. code-block:: python + + from satpy.dataset import WavelengthRange, ModifierTuple + + id_keys_config = {'name': { + 'required': True, + }, + 'wavelength': { + 'type': WavelengthRange, + }, + 'resolution': None, + 'calibration': { + 'enum': [ + 'reflectance', + 'brightness_temperature', + 'radiance', + 'counts' + ] + }, + 'modifiers': { + 'required': True, + 'default': ModifierTuple(), + 'type': ModifierTuple, + }, + } + +Types +~~~~~ +Types are classes that implement a type to be used as value for metadata in the `DataID`. They have +to implement a few methods: + + - a `convert` class method that returns it's argument as an instance of the class + - `__hash__`, `__eq__` and `__ne__` methods + - a `distance` method the tells how "far" an instance of this class is from it's argument. + +An example of such a class is the :class:`WavelengthRange ` class. +Through its implementation, it allows us to use the wavelength in a query to find out which of the +DataID in a list which has its central wavelength closest to that query for example. + + +DataID and DataQuery interactions +================================= + +Different DataIDs and DataQuerys can have different metadata items defined. As such +we define equality between different instances of these classes, and across the classes +as equality between the sorted key/value pairs shared between the instances. +If a DataQuery has one or more values set to `'*'`, the corresponding key/value pair will be omitted from the comparison. +Instances sharing no keys will no be equal. + + +Breaking changes from DatasetIDs +================================ + + - The way to access values from the DataID and DataQuery is through getitem: `my_dataid['resolution']` + - For checking if a dataset is loaded, use `'mydataset' in scene`, as `'mydataset' in scene.keys()` will always return `False`: + the `DatasetDict` instance only supports `DataID` as key type. + +Creating DataID for tests +========================= + +Sometimes, it is useful to create `DataID` instances for testing purposes. For these cases, the `satpy.tests.utils` module +now has a `make_dsid` function that can be used just for this:: + + from satpy.tests.utils import make_dataid + did = make_dataid(name='camembert', modifiers=('runny',)) diff --git a/doc/source/multiscene.rst b/doc/source/multiscene.rst index 1f1778ccab..0de8cf1cea 100644 --- a/doc/source/multiscene.rst +++ b/doc/source/multiscene.rst @@ -110,9 +110,9 @@ roughly the same time. First, create scenes and load datasets individually: Now create a ``MultiScene`` and group the three similar IR channels together: - >>> from satpy import MultiScene, DatasetID + >>> from satpy import MultiScene, DataQuery >>> mscn = MultiScene([h8_scene, g16_scene, met10_scene]) - >>> groups = {DatasetID('IR_group', wavelength=(10, 11, 12)): ['B13', 'C13', 'IR_108']} + >>> groups = {DataQuery('IR_group', wavelength=(10, 11, 12)): ['B13', 'C13', 'IR_108']} >>> mscn.group(groups) Finally, resample the datasets to a common grid and blend them together: diff --git a/doc/source/overview.rst b/doc/source/overview.rst index d98e910dcf..8941afcc0f 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -48,11 +48,12 @@ For help on developing with dask and xarray see :doc:`dev_guide/xarray_migration` or the documentation for the specific project. -To uniquely identify ``DataArray`` objects Satpy uses `DatasetID`. A -``DatasetID`` consists of various pieces of available metadata. This usually -includes `name` and `wavelength` as identifying metadata, but also includes +To uniquely identify ``DataArray`` objects Satpy uses `DataID`. A +``DataID`` consists of various pieces of available metadata. This usually +includes `name` and `wavelength` as identifying metadata, but can also include `resolution`, `calibration`, `polarization`, and additional `modifiers` -to further distinguish one dataset from another. +to further distinguish one dataset from another. For more information on `DataID` +objects, have a look a :doc:`dev_guide/satpy_internals`. .. warning:: diff --git a/doc/source/readers.rst b/doc/source/readers.rst index f5da705420..ed64d55ade 100644 --- a/doc/source/readers.rst +++ b/doc/source/readers.rst @@ -47,10 +47,10 @@ to them. By default Satpy will provide the version of the dataset with the highest resolution and the highest level of calibration (brightness temperature or reflectance over radiance). It is also possible to request one of these exact versions of a dataset by using the -:class:`~satpy.dataset.DatasetID` class:: +:class:`~satpy.dataset.DataQuery` class:: - >>> from satpy import DatasetID - >>> my_channel_id = DatasetID(name='IR_016', calibration='radiance') + >>> from satpy import DataQuery + >>> my_channel_id = DataQuery(name='IR_016', calibration='radiance') >>> scn.load([my_channel_id]) >>> print(scn['IR_016']) @@ -93,7 +93,7 @@ load the datasets using e.g.:: If a dataset could not be loaded there is no exception raised. You must check the :meth:`scn.missing_datasets ` - property for any ``DatasetID`` that could not be loaded. + property for any ``DataID`` that could not be loaded. To find out what datasets are available from a reader from the files that were provided to the ``Scene`` use @@ -137,8 +137,7 @@ Metadata The datasets held by a scene also provide vital metadata such as dataset name, units, observation time etc. The following attributes are standardized across all readers: -* ``name``, ``wavelength``, ``resolution``, ``polarization``, ``calibration``, ``level``, - ``modifiers``: See :class:`satpy.dataset.DatasetID`. +* ``name``, and other identifying metadata keys: See :doc:`dev_guide/satpy_internals`. * ``start_time``: Left boundary of the time interval covered by the dataset. * ``end_time``: Right boundary of the time interval covered by the dataset. * ``area``: :class:`~pyresample.geometry.AreaDefinition` or diff --git a/satpy/__init__.py b/satpy/__init__.py index 8c06ac60de..b39973df3a 100644 --- a/satpy/__init__.py +++ b/satpy/__init__.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Satpy Package initializer. -""" +"""Satpy Package initializer.""" import os from pkg_resources import get_distribution, DistributionNotFound @@ -47,7 +46,7 @@ CALIBRATION_ORDER = {cal: idx for idx, cal in enumerate(CALIBRATION_ORDER)} from satpy.utils import get_logger # noqa -from satpy.dataset import DatasetID, DATASET_KEYS # noqa +from satpy.dataset import DataID, DataQuery # noqa from satpy.readers import (DatasetDict, find_files_and_readers, # noqa available_readers) # noqa from satpy.writers import available_writers # noqa diff --git a/satpy/composites/__init__.py b/satpy/composites/__init__.py index acf7622a13..e361fcb164 100644 --- a/satpy/composites/__init__.py +++ b/satpy/composites/__init__.py @@ -35,7 +35,7 @@ from satpy.config import CONFIG_PATH, config_search_paths, recursive_dict_update from satpy.config import get_environ_ancpath, get_entry_points_config_dirs -from satpy.dataset import DATASET_KEYS, DatasetID, MetadataObject, combine_metadata +from satpy.dataset import DataID, DataQuery, MetadataObject, combine_metadata, minimal_default_keys_config from satpy.readers import DatasetDict from satpy.utils import sunzen_corr_cos, atmospheric_path_length_correction, get_satpos from satpy.writers import get_enhanced_image @@ -82,6 +82,7 @@ def __init__(self, ppp_config_dir=None): self.modifiers = {} self.compositors = {} self.ppp_config_dir = ppp_config_dir + self.ds_id_keys = {} def load_sensor_composites(self, sensor_name): """Load all compositor configs for the provided sensor.""" @@ -149,8 +150,15 @@ def load_compositors(self, sensor_names): return comps, mods def _process_composite_config(self, composite_name, conf, - composite_type, sensor_id, composite_config, **kwargs): - + composite_type, sensor_id, sensor_deps, composite_config, **kwargs): + try: + id_keys = conf['composite_identification_keys'] + except KeyError: + try: + id_keys = self.ds_id_keys[sensor_deps[-1]] + except IndexError: + id_keys = minimal_default_keys_config + self.ds_id_keys[sensor_id] = id_keys compositors = self.compositors[sensor_id] modifiers = self.modifiers[sensor_id] try: @@ -174,16 +182,18 @@ def _process_composite_config(self, composite_name, conf, sub_comp_name = '_' + composite_name + '_dep_{}'.format(dep_num) dep_num += 1 # Minimal composite config - sub_conf = {composite_type: {sub_comp_name: item}} + sub_conf = {composite_type: {sub_comp_name: item}, + 'composite_identification_keys': minimal_default_keys_config + } self._process_composite_config( sub_comp_name, sub_conf, composite_type, sensor_id, - composite_config, **kwargs) - else: - # we want this prerequisite to act as a query with - # 'modifiers' being None otherwise it will be an empty - # tuple - item.setdefault('modifiers', None) - key = DatasetID.from_dict(item) + sensor_deps, composite_config, **kwargs) + key_item = item.copy() + key_item.pop('prerequisites', None) + key_item.pop('optional_prerequisites', None) + if 'modifiers' in key_item: + key_item['modifiers'] = tuple(key_item['modifiers']) + key = DataQuery.from_dict(key_item) prereqs.append(key) else: prereqs.append(item) @@ -191,8 +201,8 @@ def _process_composite_config(self, composite_name, conf, if composite_type == 'composites': options.update(**kwargs) - key = DatasetID.from_dict(options) - comp = loader(**options) + key = DataID(id_keys, **options) + comp = loader(_satpy_id=key, **options) compositors[key] = comp elif composite_type == 'modifiers': modifiers[composite_name] = loader, options @@ -231,7 +241,8 @@ def _load_config(self, composite_configs, **kwargs): continue for composite_name in conf[composite_type]: self._process_composite_config(composite_name, conf, - composite_type, sensor_id, composite_config, **kwargs) + composite_type, sensor_id, + sensor_deps, composite_config, **kwargs) def check_times(projectables): @@ -302,7 +313,12 @@ def apply_modifier_info(self, origin, destination): """Apply the modifier info from *origin* to *destination*.""" o = getattr(origin, 'attrs', origin) d = getattr(destination, 'attrs', destination) - for k in DATASET_KEYS: + + try: + dataset_keys = self.attrs['_satpy_id'].id_keys.keys() + except KeyError: + dataset_keys = ['name', 'modifiers'] + for k in dataset_keys: if k == 'modifiers': d[k] = self.attrs[k] elif d.get(k) is None: diff --git a/satpy/composites/sar.py b/satpy/composites/sar.py index 19c0705e6b..f241daa879 100644 --- a/satpy/composites/sar.py +++ b/satpy/composites/sar.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Composite classes for the VIIRS instrument. -""" +"""Composite classes for the VIIRS instrument.""" import logging @@ -62,7 +61,6 @@ class SARIceLegacy(GenericCompositor): def __call__(self, projectables, *args, **kwargs): """Create the SAR RGB composite.""" - (mhh, mhv) = projectables green = overlay(mhh, mhv) green.attrs = combine_metadata(mhh, mhv) @@ -75,7 +73,6 @@ class SARRGB(GenericCompositor): def __call__(self, projectables, *args, **kwargs): """Create the SAR RGB composite.""" - (mhh, mhv) = projectables green = overlay(mhh, mhv) green.attrs = combine_metadata(mhh, mhv) diff --git a/satpy/dataset.py b/satpy/dataset.py index 77c1434836..129c49523f 100644 --- a/satpy/dataset.py +++ b/satpy/dataset.py @@ -15,17 +15,214 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Dataset objects.""" +"""Dataset identifying objects.""" import logging import numbers +import warnings from collections import namedtuple from collections.abc import Collection +from contextlib import suppress +from copy import copy, deepcopy from datetime import datetime +from enum import IntEnum, Enum + +import numpy as np logger = logging.getLogger(__name__) +class ValueList(IntEnum): + """A static value list.""" + + @classmethod + def convert(cls, value): + """Convert value to an instance of this class.""" + try: + return cls[value] + except KeyError: + raise ValueError('{} invalid value for {}'.format(value, cls)) + + def __eq__(self, other): + """Check equality.""" + return self.name == other + + def __ne__(self, other): + """Check non-equality.""" + return self.name != other + + def __hash__(self): + """Hash the object.""" + return hash(self.name) + + def __repr__(self): + """Represent the values.""" + return '<' + str(self) + '>' + + +try: + wlklass = namedtuple("WavelengthRange", "min central max unit", defaults=('µm',)) +except NameError: # python 3.6 + wlklass = namedtuple("WavelengthRange", "min central max unit") + wlklass.__new__.__defaults__ = ('µm',) + + +class WavelengthRange(wlklass): + """A named tuple for wavelength ranges. + + The elements of the range are min, central and max values, and optionally a unit + (defaults to µm). No clever unit conversion is done here, it's just used for checking + that two ranges are comparable. + """ + + def __eq__(self, other): + """Return if two wavelengths are equal. + + Args: + other (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl + + Return: + True if other is a scalar and min <= other <= max, or if other is + a tuple equal to self, False otherwise. + """ + if other is None: + return False + elif isinstance(other, numbers.Number): + return other in self + elif isinstance(other, (tuple, list)) and len(other) == 3: + return self[:3] == other + return super().__eq__(other) + + def __ne__(self, other): + """Return the opposite of `__eq__`.""" + return not self == other + + def __lt__(self, other): + """Compare to another wavelength.""" + if other is None: + return False + return super().__lt__(other) + + def __gt__(self, other): + """Compare to another wavelength.""" + if other is None: + return True + return super().__gt__(other) + + def __hash__(self): + """Hash this tuple.""" + return tuple.__hash__(self) + + def __str__(self): + """Format for print out.""" + return "{0.central} {0.unit} ({0.min}-{0.max} {0.unit})".format(self) + + def __contains__(self, other): + """Check if this range contains *other*.""" + if other is None: + return False + elif isinstance(other, numbers.Number): + return self.min <= other <= self.max + with suppress(AttributeError): + if self.unit != other.unit: + raise NotImplementedError("Can't compare wavelength ranges with different units.") + return self.min <= other.min and self.max >= other.max + return False + + def distance(self, value): + """Get the distance from value.""" + if self == value: + try: + return abs(value.central - self.central) + except AttributeError: + if isinstance(value, (tuple, list)): + return abs(value[1] - self.central) + return abs(value - self.central) + else: + return np.inf + + @classmethod + def convert(cls, wl): + """Convert `wl` to this type if possible.""" + if isinstance(wl, (tuple, list)): + return cls(*wl) + return wl + + +class ModifierTuple(tuple): + """A tuple holder for modifiers.""" + + @classmethod + def convert(cls, modifiers): + """Convert `modifiers` to this type if possible.""" + if modifiers is None: + return None + elif not isinstance(modifiers, (cls, tuple, list)): + raise TypeError("'DataID' modifiers must be a tuple or None, " + "not {}".format(type(modifiers))) + return cls(modifiers) + + def __eq__(self, other): + """Check equality.""" + if isinstance(other, list): + other = tuple(other) + return super().__eq__(other) + + def __ne__(self, other): + """Check non-equality.""" + if isinstance(other, list): + other = tuple(other) + return super().__ne__(other) + + def __hash__(self): + """Hash this tuple.""" + return tuple.__hash__(self) + + +#: Default ID keys DataArrays. +default_id_keys_config = {'name': { + 'required': True, + }, + 'wavelength': { + 'type': WavelengthRange, + }, + 'resolution': { + 'transitive': True, + }, + 'calibration': { + 'enum': [ + 'reflectance', + 'brightness_temperature', + 'radiance', + 'counts' + ] + }, + 'modifiers': { + 'default': ModifierTuple(), + 'type': ModifierTuple, + }, + } + + +#: Default ID keys for coordinate DataArrays. +default_co_keys_config = {'name': { + 'required': True, + }, + 'resolution': { + 'transitive': True, + } + } + +#: Minimal ID keys for DataArrays, for example composites. +minimal_default_keys_config = {'name': { + 'required': True, + }, + 'resolution': { + 'transitive': True, + } + } + + class MetadataObject(object): """A general metadata object.""" @@ -35,8 +232,12 @@ def __init__(self, **attributes): @property def id(self): - """Return the DatasetID of the object.""" - return DatasetID.from_dict(self.attrs) + """Return the DataID of the object.""" + try: + return self.attrs['_satpy_id'] + except KeyError: + id_keys = self.attrs.get('_satpy_id_keys', minimal_default_keys_config) + return DataID(id_keys, **self.attrs) def average_datetimes(dt_list): @@ -107,6 +308,19 @@ def combine_metadata(*metadata_objects, **kwargs): return shared_info +def get_keys_from_config(common_id_keys, config): + """Gather keys for a new DataID from the ones available in configured dataset.""" + id_keys = {} + for key, val in common_id_keys.items(): + if key in config: + id_keys[key] = val + elif val is not None and (val.get('required') is True or val.get('default') is not None): + id_keys[key] = val + if not id_keys: + raise ValueError('Metadata does not contain enough information to create a DataID.') + return id_keys + + def _share_metadata_key(k, values, average_times): """Combine metadata. Helper for combine_metadata, decide if key is shared.""" any_arrays = any([hasattr(val, "__array__") for val in values]) @@ -149,163 +363,415 @@ def _share_metadata_key_list_arrays(values): return True -DATASET_KEYS = ("name", "wavelength", "resolution", "polarization", - "calibration", "level", "modifiers") -DatasetID = namedtuple("DatasetID", " ".join(DATASET_KEYS)) -DatasetID.__new__.__defaults__ = (None, None, None, None, None, None, tuple()) +class DataID(dict): + """Identifier for all `DataArray` objects. + DataID is a dict that holds identifying and classifying + information about a DataArray. + """ -class DatasetID(DatasetID): - """Identifier for all `Dataset` objects. - - DatasetID is a namedtuple that holds identifying and classifying - information about a Dataset. There are two identifying elements, - ``name`` and ``wavelength``. These can be used to generically refer to a - Dataset. The other elements of a DatasetID are meant to further - distinguish a Dataset from the possible variations it may have. For - example multiple Datasets may be called by one ``name`` but may exist - in multiple resolutions or with different calibrations such as "radiance" - and "reflectance". If an element is `None` then it is considered not - applicable. + def __init__(self, id_keys, **keyval_dict): + """Init the DataID. - A DatasetID can also be used in Satpy to query for a Dataset. This way - a fully qualified DatasetID can be found even if some of the DatasetID - elements are unknown. In this case a `None` signifies something that is - unknown or not applicable to the requested Dataset. + The *id_keys* dictionary has to be formed as described in :doc:`satpy_internals`. + The other keyword arguments are values to be assigned to the keys. Note that + `None` isn't a valid value and will simply be ignored. + """ + self._hash = None + self._orig_id_keys = id_keys + self._id_keys = self.fix_id_keys(id_keys or {}) + if keyval_dict: + curated = self.convert_dict(keyval_dict) + else: + curated = {} + super(DataID, self).__init__(curated) - Args: - name (str): String identifier for the Dataset - wavelength (float, tuple): Single float wavelength when querying for - a Dataset. Otherwise 3-element tuple of - floats specifying the minimum, nominal, - and maximum wavelength for a Dataset. - `None` if not applicable. - resolution (int, float): Per data pixel/area resolution. If resolution - varies across the Dataset then nadir view - resolution is preferred. Usually this is in - meters, but for lon/lat gridded data angle - degrees may be used. - polarization (str): 'V' or 'H' polarizations of a microwave channel. - `None` if not applicable. - calibration (str): String identifying the calibration level of the - Dataset (ex. 'radiance', 'reflectance', etc). - `None` if not applicable. - level (int, float): Pressure/altitude level of the dataset. This is - typically in hPa, but may be in inverse meters - for altitude datasets (1/meters). - modifiers (tuple): Tuple of strings identifying what corrections or - other modifications have been performed on this - Dataset (ex. 'sunz_corrected', 'rayleigh_corrected', - etc). `None` or empty tuple if not applicable. + @staticmethod + def fix_id_keys(id_keys): + """Flesh out enums in the id keys as gotten from a config.""" + new_id_keys = id_keys.copy() + for key, val in id_keys.items(): + if not val: + continue + if 'enum' in val and 'type' in val: + raise ValueError('Cannot have both type and enum for the same id key.') + new_val = copy(val) + if 'enum' in val: + new_val['type'] = ValueList(key, ' '.join(new_val.pop('enum'))) + new_id_keys[key] = new_val + return new_id_keys + + def convert_dict(self, keyvals): + """Convert a dictionary's values to the types defined in this object's id_keys.""" + curated = {} + if not keyvals: + return curated + for key, val in self._id_keys.items(): + if val is not None: + if key in keyvals or val.get('default') is not None or val.get('required'): + curated_val = keyvals.get(key, val.get('default')) + if 'required' in val and curated_val is None: + raise ValueError('Required field {} missing.'.format(key)) + if 'type' in val: + curated[key] = val['type'].convert(curated_val) + elif curated_val is not None: + curated[key] = curated_val + else: + try: + curated_val = keyvals[key] + except KeyError: + pass + else: + if curated_val is not None: + curated[key] = curated_val + return curated - """ + @classmethod + def _unpickle(cls, id_keys, keyval): + """Create a new instance of the DataID after pickling.""" + return cls(id_keys, **keyval) - def __new__(cls, *args, **kwargs): - """Create new DatasetID.""" - ret = super(DatasetID, cls).__new__(cls, *args, **kwargs) - if ret.modifiers is not None and not isinstance(ret.modifiers, tuple): - raise TypeError("'DatasetID' modifiers must be a tuple or None, " - "not {}".format(type(ret.modifiers))) - return ret + def __reduce__(self): + """Reduce the object for pickling.""" + return (self._unpickle, (self._orig_id_keys, self.to_dict())) - @staticmethod - def name_match(a, b): - """Return if two string names are equal. + def from_dict(self, keyvals): + """Create a DataID from a dictionary.""" + return self.__class__(self._id_keys, **keyvals) - Args: - a (str): DatasetID.name or other string - b (str): DatasetID.name or other string + @classmethod + def from_dataarray(cls, array, default_keys=minimal_default_keys_config): + """Get the DataID using the dataarray attributes.""" + if '_satpy_id' in array.attrs: + return array.attrs['_satpy_id'] + return cls.new_id_from_dataarray(array, default_keys) - """ - return a == b + @classmethod + def new_id_from_dataarray(cls, array, default_keys=minimal_default_keys_config): + """Create a new DataID from a dataarray's attributes.""" + try: + id_keys = array.attrs['_satpy_id'].id_keys + except KeyError: + id_keys = array.attrs.get('_satpy_id_keys', default_keys) + return cls(id_keys, **array.attrs) - @staticmethod - def wavelength_match(a, b): - """Return if two wavelengths are equal. + @property + def id_keys(self): + """Get the id_keys.""" + return deepcopy(self._id_keys) + + def create_dep_filter(self, query): + """Remove the required fields from *query*.""" + try: + new_query = query.to_dict() + except AttributeError: + new_query = query.copy() + for key, val in self._id_keys.items(): + if val and (val.get('transitive') is not True): + new_query.pop(key, None) + return DataQuery.from_dict(new_query) + + def _asdict(self): + return dict(self.items()) + + def to_dict(self): + """Convert the ID to a dict.""" + res_dict = dict() + for key, value in self._asdict().items(): + if isinstance(value, Enum): + res_dict[key] = value.name + else: + res_dict[key] = value + return res_dict + + def __getattr__(self, key): + """Support old syntax for getting items.""" + if key in self._id_keys: + warnings.warn('Attribute access to DataIDs is deprecated, use key access instead.', + stacklevel=2) + return self[key] + else: + return super().__getattr__(key) - Args: - a (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl - b (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl + def __deepcopy__(self, memo=None): + """Copy this object. + Returns self as it's immutable. """ - if type(a) == (type(b) or - isinstance(a, numbers.Number) and - isinstance(b, numbers.Number)): - return a == b - elif a is None or b is None: - return False - elif isinstance(a, (list, tuple)) and len(a) == 3: - return a[0] <= b <= a[2] - elif isinstance(b, (list, tuple)) and len(b) == 3: - return b[0] <= a <= b[2] - else: - raise ValueError("Can only compare wavelengths of length 1 or 3") + return self - def _comparable(self): - """Get a comparable version of the DatasetID. + def __copy__(self): + """Copy this object. - Without this DatasetIDs often raise an exception when compared in - Python 3 due to None not being comparable with other types. + Returns self as it's immutable. """ - return self._replace( - name='' if self.name is None else self.name, - wavelength=tuple() if self.wavelength is None else self.wavelength, - resolution=0 if self.resolution is None else self.resolution, - polarization='' if self.polarization is None else self.polarization, - calibration='' if self.calibration is None else self.calibration, - ) + return self + + def __repr__(self): + """Represent the id.""" + items = ("{}={}".format(key, repr(val)) for key, val in self.items()) + return self.__class__.__name__ + "(" + ", ".join(items) + ")" + + def _replace(self, **kwargs): + """Make a new instance with replaced items.""" + info = dict(self.items()) + info.update(kwargs) + return self.from_dict(info) + + def __hash__(self): + """Hash the object.""" + if self._hash is None: + self._hash = hash(tuple(sorted(self.items()))) + return self._hash + + def _immutable(self, *args, **kws): + """Raise and error.""" + raise TypeError('Cannot change a DataID') def __lt__(self, other): - """Less than.""" - """Compare DatasetIDs with special handling of `None` values""" - # modifiers should never be None when sorted, should be tuples - if isinstance(other, DatasetID): - other = other._comparable() - return super(DatasetID, self._comparable()).__lt__(other) + """Check lesser than.""" + list_self, list_other = [], [] + for key in self._id_keys: + if key not in self and key not in other: + continue + elif key in self and key in other: + list_self.append(self[key]) + list_other.append(other[key]) + elif key in self: + val = self[key] + list_self.append(val) + if isinstance(val, numbers.Number): + list_other.append(0) + elif isinstance(val, str): + list_other.append('') + elif isinstance(val, tuple): + list_other.append(tuple()) + else: + raise NotImplementedError("Don't know how to generalize " + str(type(val))) + elif key in other: + val = other[key] + list_other.append(val) + if isinstance(val, numbers.Number): + list_self.append(0) + elif isinstance(val, str): + list_self.append('') + elif isinstance(val, tuple): + list_self.append(tuple()) + else: + raise NotImplementedError("Don't know how to generalize " + str(type(val))) + return tuple(list_self) < tuple(list_other) + + __setitem__ = _immutable + __delitem__ = _immutable + pop = _immutable + popitem = _immutable + clear = _immutable + update = _immutable + setdefault = _immutable + + +class DataQuery: + """The data query object. + + A DataQuery can be used in Satpy to query for a Dataset. This way + a fully qualified DataID can be found even if some of the DataID + elements are unknown. In this case a `*` signifies something that is + unknown or not applicable to the requested Dataset. + """ + + def __init__(self, **kwargs): + """Initialize the query.""" + self._dict = kwargs.copy() + self._fields = tuple(self._dict.keys()) + self._values = tuple(self._dict.values()) + + def __getitem__(self, key): + """Get an item.""" + return self._dict[key] def __eq__(self, other): - """Check for equality.""" - if isinstance(other, str): - return self.name_match(self.name, other) - elif isinstance(other, numbers.Number) or \ - isinstance(other, (tuple, list)) and len(other) == 3: - return self.wavelength_match(self.wavelength, other) - else: - return super(DatasetID, self).__eq__(other) + """Compare the DataQuerys. + + A DataQuery is considered equal to another DataQuery or DataID + if they have common keys that have equal values. + """ + sdict = self._asdict() + try: + odict = other._asdict() + except AttributeError: + return False + common_keys = False + for key, val in sdict.items(): + if key in odict: + common_keys = True + if odict[key] != val and val is not None: + return False + return common_keys def __hash__(self): - """Generate the hash of the ID.""" - return tuple.__hash__(self) + """Hash.""" + fields = [] + values = [] + for field, value in sorted(self._dict.items()): + if value != '*': + fields.append(field) + if isinstance(value, (list, set)): + value = tuple(value) + values.append(value) + return hash(tuple(zip(fields, values))) + + def get(self, key, default=None): + """Get an item.""" + return self._dict.get(key, default) @classmethod - def from_dict(cls, d, **kwargs): + def from_dict(cls, the_dict): """Convert a dict to an ID.""" - args = [] - for k in DATASET_KEYS: - val = kwargs.get(k, d.get(k)) - # force modifiers to tuple - if k == 'modifiers' and val is not None: - val = tuple(val) - args.append(val) + return cls(**the_dict) - return cls(*args) + def _asdict(self): + return dict(zip(self._fields, self._values)) def to_dict(self, trim=True): """Convert the ID to a dict.""" if trim: return self._to_trimmed_dict() else: - return dict(zip(DATASET_KEYS, self)) + return self._asdict() def _to_trimmed_dict(self): - return {key: getattr(self, key) for key in DATASET_KEYS - if getattr(self, key) is not None} + return {key: val for key, val in self._dict.items() + if val != '*'} + + def __repr__(self): + """Represent the query.""" + items = ("{}={}".format(key, repr(val)) for key, val in zip(self._fields, self._values)) + return self.__class__.__name__ + "(" + ", ".join(items) + ")" + def filter_dataids(self, dataid_container): + """Filter DataIDs based on this query.""" + keys = list(filter(self._match_dataid, dataid_container)) -def create_filtered_dsid(dataset_key, **dfilter): - """Create a DatasetID matching *dataset_key* and *dfilter*. + return keys - If a proprety is specified in both *dataset_key* and *dfilter*, the former + def _match_dataid(self, dataid): + """Match the dataid with the current query.""" + if self._shares_required_keys(dataid): + keys_to_check = set(dataid.keys()) & set(self._fields) + else: + keys_to_check = set(dataid._id_keys.keys()) & set(self._fields) + if not keys_to_check: + return False + return all(self._match_query_value(key, dataid.get(key)) for key in keys_to_check) + + def _shares_required_keys(self, dataid): + """Check if dataid shares required keys with the current query.""" + for key, val in dataid._id_keys.items(): + try: + if val.get('required', False): + if key in self._fields: + return True + except AttributeError: + continue + return False + + def _match_query_value(self, key, id_val): + val = self._dict[key] + if val == '*': + return True + if isinstance(id_val, tuple) and isinstance(val, (tuple, list)): + return tuple(val) == id_val + if not isinstance(val, list): + val = [val] + return id_val in val + + def sort_dataids(self, dataids): + """Sort the DataIDs based on this query. + + Returns the sorted dataids and the list of distances. + + The sorting is performed based on the types of the keys to search on + (as they are defined in the DataIDs from `dataids`). + If that type defines a `distance` method, then it is used to find how + 'far' the DataID is from the current query. + If the type is a number, a simple subtraction is performed. + For other types, the distance is 0 if the values are identical, np.inf + otherwise. + + For example, with the default DataID, we use the following criteria: + + 1. Central wavelength is nearest to the `key` wavelength if + specified. + 2. Least modified dataset if `modifiers` is `None` in `key`. + Otherwise, the modifiers are ignored. + 3. Highest calibration if `calibration` is `None` in `key`. + Calibration priority is chosen by `satpy.CALIBRATION_ORDER`. + 4. Best resolution (smallest number) if `resolution` is `None` + in `key`. Otherwise, the resolution is ignored. + + """ + distances = [] + sorted_dataids = [] + big_distance = 100000 + keys = set(self._dict.keys()) + for dataid in dataids: + keys |= set(dataid.keys()) + for dataid in sorted(dataids): + sorted_dataids.append(dataid) + distance = 0 + for key in keys: + val = self._dict.get(key, '*') + if val == '*': + try: + # for enums + distance += dataid.get(key).value + except AttributeError: + if isinstance(dataid.get(key), numbers.Number): + distance += dataid.get(key) + elif isinstance(dataid.get(key), tuple): + distance += len(dataid.get(key)) + else: + try: + dataid_val = dataid[key] + except KeyError: + distance += big_distance + break + try: + distance += dataid_val.distance(val) + except AttributeError: + if not isinstance(val, list): + val = [val] + if dataid_val not in val: + distance = np.inf + break + elif isinstance(dataid_val, numbers.Number): + # so as to get the highest resolution first + # FIXME: this ought to be clarified, not sure that + # higher resolution is preferable is all cases. + # Moreover this might break with other numerical + # values. + distance += dataid_val + distances.append(distance) + distances, dataids = zip(*sorted(zip(distances, sorted_dataids))) + return dataids, distances + + +class DatasetID: + """Deprecated datasetid.""" + + def __init__(self, *args, **kwargs): + """Fake init.""" + raise TypeError("DatasetID should not be used directly") + + def from_dict(self, *args, **kwargs): + """Fake fun.""" + raise TypeError("DatasetID should not be used directly") + + +def create_filtered_query(dataset_key, filter_query): + """Create a DataQuery matching *dataset_key* and *filter_query*. + + If a property is specified in both *dataset_key* and *filter_query*, the former has priority. """ @@ -316,10 +782,14 @@ def create_filtered_dsid(dataset_key, **dfilter): ds_dict = {'name': dataset_key} elif isinstance(dataset_key, numbers.Number): ds_dict = {'wavelength': dataset_key} - for key, value in dfilter.items(): - if value is not None: - ds_dict.setdefault(key, value) - return DatasetID.from_dict(ds_dict) + else: + raise TypeError("Don't know how to interpret a dataset_key of type {}".format(type(dataset_key))) + if filter_query is not None: + for key, value in filter_query._dict.items(): + if value != '*': + ds_dict.setdefault(key, value) + + return DataQuery.from_dict(ds_dict) def dataset_walker(datasets): @@ -341,14 +811,9 @@ def replace_anc(dataset, parent_dataset): """Replace *dataset* the *parent_dataset*'s `ancillary_variables` field.""" if parent_dataset is None: return - current_dsid = DatasetID.from_dict(dataset.attrs) + id_keys = parent_dataset.attrs.get('_satpy_id_keys', dataset.attrs.get('_satpy_id_keys')) + current_dataid = DataID(id_keys, **dataset.attrs) for idx, ds in enumerate(parent_dataset.attrs['ancillary_variables']): - if current_dsid == DatasetID.from_dict(ds.attrs): + if current_dataid == DataID(id_keys, **ds.attrs): parent_dataset.attrs['ancillary_variables'][idx] = dataset return - - -class Dataset(object): - """Placeholder for the deprecated class.""" - - pass diff --git a/satpy/etc/composites/sar.yaml b/satpy/etc/composites/sar.yaml index 73ae04d2d2..c4de29ec96 100644 --- a/satpy/etc/composites/sar.yaml +++ b/satpy/etc/composites/sar.yaml @@ -9,7 +9,7 @@ composites: calibration: gamma - name: measurement polarization: hv - calibraion: gamma + calibration: gamma standard_name: sar-ice sar-ice-iw: diff --git a/satpy/etc/composites/slstr.yaml b/satpy/etc/composites/slstr.yaml index 8051695050..d4a7e0c2a8 100644 --- a/satpy/etc/composites/slstr.yaml +++ b/satpy/etc/composites/slstr.yaml @@ -1,65 +1,58 @@ sensor_name: visir/slstr +composite_identification_keys: + name: + required: true + resolution: + transitive: true + view: + enum: + - nadir + - oblique + stripe: + enum: + - a + - b + - i + +modifiers: + nir_reflectance: + compositor: !!python/name:satpy.composites.NIRReflectance + prerequisites: + - S8 + optional_prerequisites: + - solar_zenith_angle + - 13.4 + sunz_threshold: 85.0 composites: overview: compositor: !!python/name:satpy.composites.GenericCompositor prerequisites: - - name: S2_an + - name: S2 modifiers: [sunz_corrected] - - name: S3_an + - name: S3 modifiers: [sunz_corrected] - - S8_in + - S8 standard_name: overview - overview_oblique: - compositor: !!python/name:satpy.composites.GenericCompositor - prerequisites: - - name: S2_ao - modifiers: [sunz_corrected] - - name: S3_ao - modifiers: [sunz_corrected] - - S8_io - standard_name: overview_oblique - natural_color: compositor: !!python/name:satpy.composites.GenericCompositor prerequisites: - - name: S5_an + - name: S5 modifiers: [sunz_corrected] - - name: S3_an + - name: S3 modifiers: [sunz_corrected] - - name: S2_an + - name: S2 modifiers: [sunz_corrected] standard_name: natural_color - natural_color_oblique: - compositor: !!python/name:satpy.composites.GenericCompositor - prerequisites: - - name: S5_ao - modifiers: [sunz_corrected] - - name: S3_ao - modifiers: [sunz_corrected] - - name: S2_ao - modifiers: [sunz_corrected] - standard_name: natural_color_oblique - day_microphysics: compositor: !!python/name:satpy.composites.GenericCompositor prerequisites: - - name: S3_an + - name: S3 modifiers: [sunz_corrected] - - name: S7_in + - name: S7 modifiers: [nir_reflectance] - - S8_in + - S8 standard_name: day_microphysics - - day_microphysics_oblique: - compositor: !!python/name:satpy.composites.GenericCompositor - prerequisites: - - name: S3_ao - modifiers: [sunz_corrected] - - name: S7_io - modifiers: [nir_reflectance] - - S8_io - standard_name: day_microphysics_oblique diff --git a/satpy/etc/composites/visir.yaml b/satpy/etc/composites/visir.yaml index 2a4bbf994c..37bd41a853 100644 --- a/satpy/etc/composites/visir.yaml +++ b/satpy/etc/composites/visir.yaml @@ -1,4 +1,9 @@ sensor_name: visir +composite_identification_keys: + name: + required: true + resolution: + transitive: true modifiers: sunz_corrected: diff --git a/satpy/etc/readers/grib.yaml b/satpy/etc/readers/grib.yaml index 834612a326..253e6cfb1c 100644 --- a/satpy/etc/readers/grib.yaml +++ b/satpy/etc/readers/grib.yaml @@ -3,6 +3,14 @@ reader: name: grib reader: !!python/name:satpy.readers.yaml_reader.FileYAMLReader sensors: [unknown] + data_identification_keys: + name: + required: true + level: + resolution: + modifiers: + default: [] + type: !!python/name:satpy.dataset.ModifierTuple file_types: grib: diff --git a/satpy/etc/readers/nucaps.yaml b/satpy/etc/readers/nucaps.yaml index 4b697c7802..9ec1050934 100644 --- a/satpy/etc/readers/nucaps.yaml +++ b/satpy/etc/readers/nucaps.yaml @@ -3,6 +3,14 @@ reader: name: nucaps reader: !!python/name:satpy.readers.nucaps.NUCAPSReader sensors: [cris, atms] + data_identification_keys: + name: + required: true + level: + resolution: + modifiers: + default: [] + type: !!python/name:satpy.dataset.ModifierTuple file_types: nucaps: diff --git a/satpy/etc/readers/sar-c_safe.yaml b/satpy/etc/readers/sar-c_safe.yaml index e7d1ccaa09..8d1b243d52 100644 --- a/satpy/etc/readers/sar-c_safe.yaml +++ b/satpy/etc/readers/sar-c_safe.yaml @@ -4,6 +4,27 @@ reader: sensors: [sar-c] default_channels: [] reader: !!python/name:satpy.readers.yaml_reader.FileYAMLReader + data_identification_keys: + name: + required: true + polarization: + resolution: + transitive: true + calibration: + enum: + - gamma + - sigma_nought + - beta_nought + modifiers: + default: [] + type: !!python/name:satpy.dataset.ModifierTuple + + coord_identification_keys: + name: + required: true + polarization: + resolution: + transitive: true file_types: safe_measurement: diff --git a/satpy/etc/readers/slstr_l1b.yaml b/satpy/etc/readers/slstr_l1b.yaml index 7acb690316..ad0951d990 100644 --- a/satpy/etc/readers/slstr_l1b.yaml +++ b/satpy/etc/readers/slstr_l1b.yaml @@ -5,198 +5,95 @@ reader: default_channels: [] reader: !!python/name:satpy.readers.yaml_reader.FileYAMLReader + data_identification_keys: + name: + required: true + wavelength: + type: !!python/name:satpy.dataset.WavelengthRange + resolution: + transitive: true + calibration: + enum: + - reflectance + - brightness_temperature + - radiance + - counts + view: + enum: + - nadir + - oblique + stripe: + enum: + - a + - b + - i + - f + modifiers: + default: [] + type: !!python/name:satpy.dataset.ModifierTuple + + coord_identification_keys: + name: + required: true + resolution: + transitive: true + view: + enum: + - nadir + - oblique + stripe: + enum: + - a + - b + - i + - f + file_types: - esa_l1b_an: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_radiance_an.nc'] - esa_l1b_ao: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_radiance_ao.nc'] - esa_l1b_bn: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_radiance_bn.nc'] - esa_l1b_bo: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_radiance_bo.nc'] - esa_l1b_cn: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_radiance_cn.nc'] - esa_l1b_co: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_radiance_co.nc'] - esa_l1b_ntir: + esa_l1b_refl: file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_BT_{stripe:1s}n.nc'] - esa_l1b_otir: + file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_radiance_{stripe:1s}{view:1s}.nc'] + esa_l1b_tir: file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTR1B - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_BT_{stripe:1s}o.nc'] + file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/{dataset_name}_BT_{stripe:1s}{view:1s}.nc'] esa_angles: file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRAngles file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/geometry_t{view:1s}.nc'] - esa_geo_an: + esa_geo: file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRGeo - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/geodetic_a{view:1s}.nc'] - esa_geo_bn: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRGeo - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/geodetic_b{view:1s}.nc'] - esa_geo_in: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRGeo - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/geodetic_i{view:1s}.nc'] - esa_l1b_flag_an: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_an.nc'] - esa_l1b_flag_bn: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_bn.nc'] - esa_l1b_flag_cn: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_cn.nc'] - esa_l1b_flag_in: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_in.nc'] - esa_l1b_flag_ao: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_ao.nc'] - esa_l1b_flag_bo: + file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/geodetic_{stripe:1s}{view:1s}.nc'] + esa_l1b_flag: file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_bo.nc'] - esa_l1b_flag_co: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_co.nc'] - esa_l1b_flag_io: - file_reader: !!python/name:satpy.readers.slstr_l1b.NCSLSTRFlag - file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_io.nc'] - + file_patterns: ['{mission_id:3s}_SL_{processing_level:1s}_{datatype_id:_<6s}_{start_time:%Y%m%dT%H%M%S}_{end_time:%Y%m%dT%H%M%S}_{creation_time:%Y%m%dT%H%M%S}_{duration:4d}_{cycle:3d}_{relative_orbit:3d}_{frame:4s}_{centre:3s}_{mode:1s}_{timeliness:2s}_{collection:3s}.SEN3/flags_{stripe:1s}{view:1s}.nc'] datasets: - longitude_an: - name: longitude_an - resolution: 500 - view: nadir - stripe: a - file_type: esa_geo_an - file_key: longitude_an - standard_name: longitude - units: degree - - latitude_an: - name: latitude_an - resolution: 500 - view: nadir - stripe: a - file_type: esa_geo_an - file_key: latitude_an - standard_name: latitude - units: degree - - longitude_bn: - name: longitude_bn - resolution: 500 - view: nadir - stripe: b - file_type: esa_geo_bn - file_key: longitude_bn - standard_name: longitude - units: degree - - latitude_bn: - name: latitude_bn - resolution: 500 - view: nadir - stripe: b - file_type: esa_geo_bn - file_key: latitude_bn - standard_name: latitude - units: degree - - longitude_in: - name: longitude_in - resolution: 1000 - view: nadir - stripe: i - file_type: esa_geo_in - file_key: longitude_in - standard_name: longitude - units: degree - - latitude_in: - name: latitude_in - resolution: 1000 - view: nadir - stripe: i - file_type: esa_geo_in - standard_name: latitude - file_key: latitude_in - units: degree - - longitude_ao: - name: longitude_ao - resolution: 500 - view: oblique - stripe: a - file_type: esa_geo_an - file_key: longitude_ao - standard_name: longitude - units: degree - - latitude_ao: - name: latitude_ao - resolution: 500 - view: oblique - stripe: a - file_type: esa_geo_an - file_key: latitude_ao - standard_name: latitude - units: degree - - longitude_bo: - name: longitude_bo - resolution: 500 - view: oblique - stripe: b - file_type: esa_geo_bn - file_key: longitude_bo - standard_name: longitude - units: degree - - latitude_bo: - name: latitude_bo - resolution: 500 - view: oblique - stripe: b - file_type: esa_geo_bn - file_key: latitude_bo - standard_name: latitude - units: degree - - longitude_io: - name: longitude_io - resolution: 1000 - view: oblique - stripe: i - file_type: esa_geo_in - file_key: longitude_io + longitude: + name: longitude + resolution: [500, 1000] + view: [nadir, oblique] + stripe: [a, b, i, f] + file_type: esa_geo + file_key: longitude_{stripe:1s}{view:1s} standard_name: longitude units: degree - latitude_io: - name: latitude_io - resolution: 1000 - view: oblique - stripe: i - file_type: esa_geo_in + latitude: + name: latitude + resolution: [500, 1000] + view: [nadir, oblique] + stripe: [a, b, i, f] + file_type: esa_geo + file_key: latitude_{stripe:1s}{view:1s} standard_name: latitude - file_key: latitude_io units: degree # The channels S1-S3 are available in nadir (default) and oblique view. - S1_an: - name: S1_an + S1: + name: S1 sensor: slstr wavelength: [0.545,0.555,0.565] resolution: 500 - view: nadir - stripe: a + view: [nadir, oblique] + stripe: [a, b] calibration: reflectance: standard_name: toa_bidirectional_reflectance @@ -204,33 +101,16 @@ datasets: radiance: standard_name: toa_outgoing_radiance_per_unit_wavelength units: W m-2 um-1 sr-1 - coordinates: [longitude_an, latitude_an] - file_type: esa_l1b_an + coordinates: [longitude, latitude] + file_type: esa_l1b_refl - S1_ao: - name: S1_ao - sensor: slstr - wavelength: [0.545,0.555,0.565] - resolution: 500 - view: oblique - stripe: a - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_ao, latitude_ao] - file_type: esa_l1b_ao - - S2_an: - name: S2_an + S2: + name: S2 sensor: slstr wavelength: [0.649, 0.659, 0.669] resolution: 500 - view: nadir - stripe: a + view: [nadir, oblique] + stripe: [a, b] calibration: reflectance: standard_name: toa_bidirectional_reflectance @@ -238,48 +118,16 @@ datasets: radiance: standard_name: toa_outgoing_radiance_per_unit_wavelength units: W m-2 um-1 sr-1 - coordinates: [longitude_an, latitude_an] - file_type: esa_l1b_an + coordinates: [longitude, latitude] + file_type: esa_l1b_refl - S2_ao: - name: S2_ao - sensor: slstr - stripe: a - wavelength: [0.649, 0.659, 0.669] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_ao, latitude_ao] - file_type: esa_l1b_ao - - S3_an: - name: S3_an - sensor: slstr - wavelength: [0.855, 0.865, 0.875] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_an, latitude_an] - file_type: esa_l1b_an - - S3_ao: - name: S3_ao + S3: + name: S3 sensor: slstr wavelength: [0.855, 0.865, 0.875] resolution: 500 - view: oblique + view: [nadir, oblique] + stripe: [a, b] calibration: reflectance: standard_name: toa_bidirectional_reflectance @@ -287,225 +135,18 @@ datasets: radiance: standard_name: toa_outgoing_radiance_per_unit_wavelength units: W m-2 um-1 sr-1 - coordinates: [longitude_ao, latitude_ao] - file_type: esa_l1b_ao + coordinates: [longitude, latitude] + file_type: esa_l1b_refl # The channels S4-S6 are available in nadir (default) and oblique view and for both in the # a,b and c stripes. - S4_an: - name: S4_an - sensor: slstr - wavelength: [1.3675, 1.375, 1.36825] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_an, latitude_an] - file_type: esa_l1b_an - - S4_ao: - name: S4_ao - sensor: slstr - wavelength: [1.3675, 1.375, 1.36825] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_ao, latitude_ao] - file_type: esa_l1b_ao - - S5_an: - name: S5_an - sensor: slstr - wavelength: [1.58, 1.61, 1.64] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_an, latitude_an] - file_type: esa_l1b_an - - S5_ao: - name: S5_ao - sensor: slstr - wavelength: [1.58, 1.61, 1.64] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_ao, latitude_ao] - file_type: esa_l1b_ao - - S6_an: - name: S6_an - sensor: slstr - wavelength: [2.225, 2.25, 2.275] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_an, latitude_an] - file_type: esa_l1b_an - - S6_ao: - name: S6_ao - sensor: slstr - wavelength: [2.225, 2.25, 2.275] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_ao, latitude_ao] - file_type: esa_l1b_ao - - S4_bn: - name: S4_bn - sensor: slstr - wavelength: [1.3675, 1.375, 1.36825] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_bn, latitude_bn] - file_type: esa_l1b_bn - - S4_bo: - name: S4_bo - sensor: slstr - wavelength: [1.3675, 1.375, 1.36825] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_bo, latitude_bo] - file_type: esa_l1b_bo - - S5_bn: - name: S5_bn - sensor: slstr - wavelength: [1.58, 1.61, 1.64] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_bn, latitude_bn] - file_type: esa_l1b_bn - - S5_bo: - name: S5_bo - sensor: slstr - wavelength: [1.58, 1.61, 1.64] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_bo, latitude_bo] - file_type: esa_l1b_bo - - S6_bn: - name: S6_bn - sensor: slstr - wavelength: [2.225, 2.25, 2.275] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_bn, latitude_bn] - file_type: esa_l1b_bn - - S6_bo: - name: S6_bo - sensor: slstr - wavelength: [2.225, 2.25, 2.275] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_bo, latitude_bo] - file_type: esa_l1b_bo - - S4_cn: - name: S4_cn - sensor: slstr - wavelength: [1.3675, 1.375, 1.36825] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_cn, latitude_cn] - file_type: esa_l1b_cn - - S4_co: - name: S4_co + S4: + name: S4 sensor: slstr wavelength: [1.3675, 1.375, 1.36825] resolution: 500 - view: oblique + view: [nadir, oblique] + stripe: [a, b] calibration: reflectance: standard_name: toa_bidirectional_reflectance @@ -513,15 +154,16 @@ datasets: radiance: standard_name: toa_outgoing_radiance_per_unit_wavelength units: W m-2 um-1 sr-1 - coordinates: [longitude_co, latitude_co] - file_type: esa_l1b_co + coordinates: [longitude, latitude] + file_type: esa_l1b_refl - S5_cn: - name: S5_cn + S5: + name: S5 sensor: slstr wavelength: [1.58, 1.61, 1.64] resolution: 500 - view: nadir + view: [nadir, oblique] + stripe: [a, b] calibration: reflectance: standard_name: toa_bidirectional_reflectance @@ -529,47 +171,16 @@ datasets: radiance: standard_name: toa_outgoing_radiance_per_unit_wavelength units: W m-2 um-1 sr-1 - coordinates: [longitude_cn, latitude_cn] - file_type: esa_l1b_cn + coordinates: [longitude, latitude] + file_type: esa_l1b_refl - S5_co: - name: S5_co - sensor: slstr - wavelength: [1.58, 1.61, 1.64] - resolution: 500 - view: oblique - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_co, latitude_co] - file_type: esa_l1b_co - - S6_cn: - name: S6_cn - sensor: slstr - wavelength: [2.225, 2.25, 2.275] - resolution: 500 - view: nadir - calibration: - reflectance: - standard_name: toa_bidirectional_reflectance - units: "%" - radiance: - standard_name: toa_outgoing_radiance_per_unit_wavelength - units: W m-2 um-1 sr-1 - coordinates: [longitude_cn, latitude_cn] - file_type: esa_l1b_cn - - S6_co: - name: S6_co + S6: + name: S6 sensor: slstr wavelength: [2.225, 2.25, 2.275] resolution: 500 - view: oblique + view: [nadir, oblique] + stripe: [a, b] calibration: reflectance: standard_name: toa_bidirectional_reflectance @@ -577,444 +188,160 @@ datasets: radiance: standard_name: toa_outgoing_radiance_per_unit_wavelength units: W m-2 um-1 sr-1 - coordinates: [longitude_co, latitude_co] - file_type: esa_l1b_co + coordinates: [longitude, latitude] + file_type: esa_l1b_refl # The channels S7-S9, F1 and F2 are available in nadir (default) and oblique view. - S7_in: - name: S7_in - sensor: slstr - wavelength: [3.55, 3.74, 3.93] - resolution: 1000 - view: nadir - calibration: - brightness_temperature: - standard_name: toa_brightness_temperature - units: "K" - coordinates: [longitude_in, latitude_in] - file_type: esa_l1b_ntir - - S7_io: - name: S7_io + S7: + name: S7 sensor: slstr wavelength: [3.55, 3.74, 3.93] resolution: 1000 - view: oblique - calibration: - brightness_temperature: - standard_name: toa_brightness_temperature - units: "K" - coordinates: [longitude_io, latitude_io] - file_type: esa_l1b_otir - - S8_in: - name: S8_in - sensor: slstr - wavelength: [10.4, 10.85, 11.3] - resolution: 1000 - view: nadir + view: [nadir, oblique] + stripe: i calibration: brightness_temperature: standard_name: toa_brightness_temperature units: "K" - coordinates: [longitude_in, latitude_in] - file_type: esa_l1b_ntir + coordinates: [longitude, latitude] + file_type: esa_l1b_tir - S8_io: - name: S8_io + S8: + name: S8 sensor: slstr wavelength: [10.4, 10.85, 11.3] resolution: 1000 - view: oblique - calibration: - brightness_temperature: - standard_name: toa_brightness_temperature - units: "K" - coordinates: [longitude_io, latitude_io] - file_type: esa_l1b_otir - - S9_in: - name: S9_in - sensor: slstr - wavelength: [11.0, 12.0, 13.0] - resolution: 1000 - view: nadir + view: [nadir, oblique] + stripe: i calibration: brightness_temperature: standard_name: toa_brightness_temperature units: "K" - coordinates: [longitude_in, latitude_in] - file_type: esa_l1b_ntir + coordinates: [longitude, latitude] + file_type: esa_l1b_tir - S9_io: - name: S9_io + S9: + name: S9_ sensor: slstr wavelength: [11.0, 12.0, 13.0] resolution: 1000 - view: oblique - calibration: - brightness_temperature: - standard_name: toa_brightness_temperature - units: "K" - coordinates: [longitude_io, latitude_io] - file_type: esa_l1b_otir - - F1_in: - name: F1_in - sensor: slstr - wavelength: [3.55, 3.74, 3.93] - resolution: 1000 - view: nadir + view: [nadir, oblique] + stripe: i calibration: brightness_temperature: standard_name: toa_brightness_temperature units: "K" - coordinates: [longitude_in, latitude_in] - file_type: esa_l1b_ntir + coordinates: [longitude, latitude] + file_type: esa_l1b_tir - F1_io: - name: F1_io + F1: + name: F1 sensor: slstr wavelength: [3.55, 3.74, 3.93] resolution: 1000 - view: oblique + view: [nadir, oblique] + stripe: f calibration: brightness_temperature: standard_name: toa_brightness_temperature units: "K" - coordinates: [longitude_io, latitude_io] - file_type: esa_l1b_otir + coordinates: [longitude, latitude] + file_type: esa_l1b_tir - F2_in: - name: F2_in + F2: + name: F2 sensor: slstr wavelength: [10.4, 10.85, 11.3] resolution: 1000 - view: nadir - calibration: - brightness_temperature: - standard_name: toa_brightness_temperature - units: "K" - coordinates: [longitude_in, latitude_in] - file_type: esa_l1b_ntir - - F2_io: - name: F2_io - sensor: slstr - wavelength: [10.4, 10.85, 11.3] - resolution: 1000 - view: oblique + view: [nadir, oblique] + stripe: i calibration: brightness_temperature: standard_name: toa_brightness_temperature units: "K" - coordinates: [longitude_io, latitude_io] - file_type: esa_l1b_otir + coordinates: [longitude, latitude] + file_type: esa_l1b_tir - solar_zenith_angle_n: - name: solar_zenith_angle_n + solar_zenith_angle: + name: solar_zenith_angle sensor: slstr - resolution: 1000 - coordinates: [longitude_in, latitude_in] - view: nadir + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] standard_name: solar_zenith_angle file_type: esa_angles - file_key: solar_zenith_tn + file_key: solar_zenith_t{view:1s} - solar_azimuth_angle_n: - name: solar_azimuth_angle_n + solar_azimuth_angle: + name: solar_azimuth_angle sensor: slstr - resolution: 1000 - coordinates: [longitude_in, latitude_in] - view: nadir + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] standard_name: solar_azimuth_angle file_type: esa_angles - file_key: solar_azimuth_tn + file_key: solar_azimuth_t{view:1s} - satellite_zenith_angle_n: - name: satellite_zenith_angle_n + satellite_zenith_angle: + name: satellite_zenith_angle sensor: slstr - resolution: 1000 - coordinates: [longitude_in, latitude_in] - view: nadir + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] standard_name: satellite_zenith_angle file_type: esa_angles - file_key: sat_zenith_tn + file_key: sat_zenith_t{view:1s} - satellite_azimuth_angle_n: + satellite_azimuth_angle: name: satellite_azimuth_angle_n sensor: slstr - resolution: 1000 - coordinates: [longitude_in, latitude_in] - view: nadir - standard_name: satellite_azimuth_angle - file_type: esa_angles - file_key: sat_azimuth_tn - - solar_zenith_angle_o: - name: solar_zenith_angle_o - sensor: slstr - resolution: 1000 - coordinates: [longitude_io, latitude_io] - view: oblique - standard_name: solar_zenith_angle - file_type: esa_angles - file_key: solar_zenith_to - - solar_azimuth_angle_o: - name: solar_azimuth_angle_o - sensor: slstr - resolution: 1000 - coordinates: [longitude_io, latitude_io] - view: oblique - standard_name: solar_azimuth_angle - file_type: esa_angles - file_key: solar_azimuth_to - - satellite_zenith_angle_o: - name: satellite_zenith_angle_o - sensor: slstr - resolution: 1000 - coordinates: [longitude_io, latitude_io] - view: oblique - standard_name: satellite_zenith_angle - file_type: esa_angles - file_key: sat_zenith_to - - satellite_azimuth_angle_o: - name: satellite_azimuth_angle_o - sensor: slstr - resolution: 1000 - coordinates: [longitude_io, latitude_io] - view: oblique + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] standard_name: satellite_azimuth_angle file_type: esa_angles - file_key: sat_azimuth_to + file_key: sat_azimuth_t{view:1s} # CloudFlags are all bitfields. They are available in nadir (default) and oblique view for # each of the a,b,c,i stripes. - cloud_an: - name: cloud_an - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_an - coordinates: [longitude_an, latitude_an] - - confidence_an: - name: confidence_an - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_an - coordinates: [longitude_an, latitude_an] - - pointing_an: - name: pointing_an - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_an - coordinates: [longitude_an, latitude_an] - - bayes_an: - name: bayes_an - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_an - coordinates: [longitude_an, latitude_an] - - cloud_bn: - name: cloud_bn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bn - coordinates: [longitude_bn, latitude_bn] - - confidence_bn: - name: confidence_bn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bn - coordinates: [longitude_bn, latitude_bn] - - pointing_bn: - name: pointing_bn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bn - coordinates: [longitude_bn, latitude_bn] - - bayes_bn: - name: bayes_bn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bn - coordinates: [longitude_bn, latitude_bn] - - cloud_cn: - name: cloud_cn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_cn - coordinates: [longitude_cn, latitude_cn] - - confidence_cn: - name: confidence_cn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_cn - coordinates: [longitude_cn, latitude_cn] - - pointing_cn: - name: pointing_cn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_cn - coordinates: [longitude_cn, latitude_cn] - - bayes_cn: - name: bayes_cn - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_cn - coordinates: [longitude_cn, latitude_cn] - - cloud_in: - name: cloud_in - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_in - coordinates: [longitude_in, latitude_in] - - confidence_in: - name: confidence_in - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_in - coordinates: [longitude_in, latitude_in] - - pointing_in: - name: pointing_in - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_in - coordinates: [longitude_in, latitude_in] - - bayes_in: - name: bayes_in - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_in - coordinates: [longitude_in, latitude_in] - -# CloudFlags are all bitfields. Now for the oblique view - cloud_ao: - name: cloud_ao - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_ao - coordinates: [longitude_ao, latitude_ao] - - confidence_ao: - name: confidence_ao - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_ao - coordinates: [longitude_ao, latitude_ao] - - pointing_ao: - name: pointing_ao - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_ao - coordinates: [longitude_ao, latitude_ao] - - bayes_ao: - name: bayes_ao - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_ao - coordinates: [longitude_ao, latitude_ao] - - cloud_bo: - name: cloud_bo - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bo - coordinates: [longitude_bo, latitude_bo] - - confidence_bo: - name: confidence_bo - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bo - coordinates: [longitude_bo, latitude_bo] - - pointing_bo: - name: pointing_bo - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bo - coordinates: [longitude_bo, latitude_bo] - - bayes_bo: - name: bayes_bo - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_bo - coordinates: [longitude_bo, latitude_bo] - - cloud_co: - name: cloud_co - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_co - coordinates: [longitude_co, latitude_co] - - confidence_co: - name: confidence_co - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_co - coordinates: [longitude_co, latitude_co] - - pointing_co: - name: pointing_co - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_co - coordinates: [longitude_co, latitude_co] - - bayes_co: - name: bayes_co - sensor: slstr - resolution: 500 - file_type: esa_l1b_flag_co - coordinates: [longitude_co, latitude_co] - - cloud_io: - name: cloud_io - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_io - coordinates: [longitude_io, latitude_io] - - confidence_io: - name: confidence_io - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_io - coordinates: [longitude_io, latitude_io] - - pointing_io: - name: pointing_io - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_io - coordinates: [longitude_io, latitude_io] - - bayes_io: - name: bayes_io - sensor: slstr - resolution: 1000 - file_type: esa_l1b_flag_io - coordinates: [longitude_io, latitude_io] + cloud: + name: cloud + sensor: slstr + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] + stripe: [a, b, i, f] + file_type: esa_l1b_flag + file_key: cloud_{stripe:1s}{view:1s} + + confidence: + name: confidence + sensor: slstr + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] + stripe: [a, b, i, f] + file_type: esa_l1b_flag + file_key: confidence_{stripe:1s}{view:1s} + + + pointing: + name: pointing + sensor: slstr + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] + stripe: [a, b, i, f] + file_type: esa_l1b_flag + file_key: pointing_{stripe:1s}{view:1s} + + bayes: + name: bayes + sensor: slstr + resolution: [500, 1000] + coordinates: [longitude, latitude] + view: [nadir, oblique] + stripe: [a, b, i, f] + file_type: esa_l1b_flag + file_key: bayes_{stripe:1s}{view:1s} diff --git a/satpy/multiscene.py b/satpy/multiscene.py index eca34b6fbf..73b23662ba 100644 --- a/satpy/multiscene.py +++ b/satpy/multiscene.py @@ -17,23 +17,19 @@ # satpy. If not, see . """MultiScene object to work with multiple timesteps of satellite data.""" -import logging import copy -import numpy as np +import logging +from queue import Queue +from threading import Thread + import dask.array as da -import xarray as xr +import numpy as np import pandas as pd +import xarray as xr + +from satpy.dataset import DataID, combine_metadata from satpy.scene import Scene from satpy.writers import get_enhanced_image -from satpy.dataset import combine_metadata, DatasetID -from threading import Thread - -try: - # python 3 - from queue import Queue -except ImportError: - # python 2 - from Queue import Queue try: import imageio @@ -79,7 +75,7 @@ def add_group_aliases(scenes, groups): for group_id, member_names in groups.items(): # Find out whether one of the datasets in this scene belongs # to this group - member_ids = [DatasetID.from_dict(scene[name].attrs) + member_ids = [scene[name].attrs['_satpy_id'] for name in member_names if name in scene] # Add an alias for the group it belongs to @@ -315,10 +311,10 @@ def group(self, groups): by `MultiScene`. Even if their dataset IDs differ (for example because the names or wavelengths are slightly different). Groups can be specified as a dictionary `{group_id: dataset_names}` where the keys - must be of type `DatasetID`, for example:: + must be of type `DataQuery`, for example:: groups={ - DatasetID('my_group', wavelength=(10, 11, 12)): ['IR_108', 'B13', 'C13'] + DataQuery('my_group', wavelength=(10, 11, 12)): ['IR_108', 'B13', 'C13'] } """ self._scenes = add_group_aliases(self._scenes, groups) @@ -429,7 +425,6 @@ def _format_decoration(ds, decorate): If the nested dictionary in decorate (argument to ``save_animation``) contains a text to be added, format those based on dataset parameters. """ - if decorate is None or "decorate" not in decorate: return decorate deco_local = copy.deepcopy(decorate) @@ -552,7 +547,7 @@ def _get_writers_and_frames( info_scenes.append(scenes[-1]) available_ds = [first_scene.datasets.get(ds) for ds in first_scene.wishlist] - available_ds = [DatasetID.from_dict(ds.attrs) for ds in available_ds if ds is not None] + available_ds = [DataID.from_dataarray(ds) for ds in available_ds if ds is not None] dataset_ids = datasets or available_ds if not dataset_ids: @@ -603,7 +598,7 @@ def save_animation(self, filename, datasets=None, fps=10, fill_value=None, filename (str): Filename to save to. Can include python string formatting keys from dataset ``.attrs`` (ex. "{name}_{start_time:%Y%m%d_%H%M%S.gif") - datasets (list): DatasetIDs to save (default: all datasets) + datasets (list): DataIDs to save (default: all datasets) fps (int): Frames per second for produced animation fill_value (int): Value to use instead creating an alpha band. batch_size (int): Number of frames to compute at the same time. diff --git a/satpy/node.py b/satpy/node.py index 866be1341f..46b34756a3 100644 --- a/satpy/node.py +++ b/satpy/node.py @@ -17,10 +17,11 @@ # satpy. If not, see . """Nodes to build trees.""" -from satpy import DatasetDict, DatasetID, DATASET_KEYS +from satpy import DatasetDict +from satpy.dataset import DataID, DataQuery, ModifierTuple from satpy.readers import TooManyResults from satpy.utils import get_logger -from satpy.dataset import create_filtered_dsid +from satpy.dataset import create_filtered_query LOG = get_logger(__name__) # Empty leaf used for marking composites with no prerequisites @@ -266,11 +267,11 @@ def __getitem__(self, item): return self._all_nodes[item] def contains(self, item): - """Check contains when we know the *exact* DatasetID.""" + """Check contains when we know the *exact* DataID or DataQuery.""" return super(DatasetDict, self._all_nodes).__contains__(item) def getitem(self, item): - """Get Node when we know the *exact* DatasetID.""" + """Get Node when we know the *exact* DataID or DataQuery.""" return super(DatasetDict, self._all_nodes).__getitem__(item) def get_compositor(self, key): @@ -280,8 +281,7 @@ def get_compositor(self, key): return self.compositors[sensor_name][key] except KeyError: continue - - if isinstance(key, DatasetID) and key.modifiers: + if isinstance(key, (DataQuery, DataID)) and key.get('modifiers'): # we must be generating a modifier composite return self.get_modifier(key) @@ -289,8 +289,8 @@ def get_compositor(self, key): def get_modifier(self, comp_id): """Get a modifer.""" - # create a DatasetID for the compositor we are generating - modifier = comp_id.modifiers[-1] + # create a DataID for the compositor we are generating + modifier = comp_id['modifiers'][-1] for sensor_name in self.modifiers.keys(): modifiers = self.modifiers[sensor_name] compositors = self.compositors[sensor_name] @@ -301,30 +301,25 @@ def get_modifier(self, comp_id): moptions = moptions.copy() moptions.update(comp_id.to_dict()) moptions['sensor'] = sensor_name - compositors[comp_id] = mloader(**moptions) + compositors[comp_id] = mloader(_satpy_id=comp_id, **moptions) return compositors[comp_id] raise KeyError("Could not find modifier '{}'".format(modifier)) - def _find_reader_dataset(self, dataset_key, **dfilter): - """Attempt to find a `DatasetID` in the available readers. + def _find_reader_dataset(self, dataset_key): + """Attempt to find a `DataID` in the available readers. Args: - dataset_key (str, float, DatasetID): - Dataset name, wavelength, or a combination of `DatasetID` - parameters to use in searching for the dataset from the + dataset_key (str, float, DataID, DataQuery): + Dataset name, wavelength, `DataID` or `DataQuery` + to use in searching for the dataset from the available readers. - **dfilter (list or str): `DatasetID` parameters besides `name` - and `wavelength` to use to filter the - available datasets. Passed directly to - `get_dataset_key` of the readers, see - that method for more information. """ too_many = False for reader_name, reader_instance in self.readers.items(): try: - ds_id = reader_instance.get_dataset_key(dataset_key, available_only=self._available_only, **dfilter) + ds_id = reader_instance.get_dataset_key(dataset_key, available_only=self._available_only) except TooManyResults: LOG.trace("Too many datasets matching key {} in reader {}".format(dataset_key, reader_name)) too_many = True @@ -334,7 +329,7 @@ def _find_reader_dataset(self, dataset_key, **dfilter): continue LOG.trace("Found {} in reader {} when asking for {}".format(str(ds_id), reader_name, repr(dataset_key))) try: - # now that we know we have the exact DatasetID see if we have already created a Node for it + # now that we know we have the exact DataID see if we have already created a Node for it return self.getitem(ds_id) except KeyError: # we haven't created a node yet, create it now @@ -342,14 +337,14 @@ def _find_reader_dataset(self, dataset_key, **dfilter): if too_many: raise TooManyResults("Too many keys matching: {}".format(dataset_key)) - def _get_compositor_prereqs(self, parent, prereq_names, skip=False, - **dfilter): + def _get_compositor_prereqs(self, parent, prereqs, skip=False, + query=None): """Determine prerequisite Nodes for a composite. Args: parent (Node): Compositor node to add these prerequisites under - prereq_names (sequence): Strings (names), floats (wavelengths), or - DatasetIDs to analyze. + prereqs (sequence): Strings (names), floats (wavelengths), or + DataQuerys to analyze. skip (bool, optional): If True, prerequisites are considered optional if they can't be found and a debug message is logged. If False (default), @@ -360,12 +355,11 @@ def _get_compositor_prereqs(self, parent, prereq_names, skip=False, """ prereq_ids = [] unknowns = set() - if not prereq_names and not skip: + if not prereqs and not skip: # this composite has no required prerequisites - prereq_names = [None] - - for prereq in prereq_names: - n, u = self._find_dependencies(prereq, **dfilter) + prereqs = [None] + for prereq in prereqs: + n, u = self._find_dependencies(prereq, query=query) if u: unknowns.update(u) if skip: @@ -377,8 +371,8 @@ def _get_compositor_prereqs(self, parent, prereq_names, skip=False, self.add_child(parent, n) return prereq_ids, unknowns - def _update_modifier_key(self, orig_key, dep_key): - """Update a key based on the dataset it will modified (dep). + def _update_modifier_id(self, query, dep_key): + """Promote a query to an id based on the dataset it will modify (dep). Typical use case is requesting a modified dataset (orig_key). This modified dataset most likely depends on a less-modified @@ -391,80 +385,85 @@ def _update_modifier_key(self, orig_key, dep_key): chance of Node's not being unique. """ - orig_dict = orig_key._asdict() + orig_dict = query._asdict() dep_dict = dep_key._asdict() - # don't change the modifiers - for k in DATASET_KEYS[:-1]: - orig_dict[k] = dep_dict[k] - return DatasetID.from_dict(orig_dict) + for k, dep_val in dep_dict.items(): + # don't change the modifiers, just cast them to the right class + if isinstance(dep_val, ModifierTuple): + orig_dict[k] = dep_val.__class__(orig_dict[k]) + else: + orig_dict[k] = dep_val + return dep_key.from_dict(orig_dict) - def _find_compositor(self, dataset_key, **dfilter): + def _find_compositor(self, dataset_key): """Find the compositor object for the given dataset_key.""" # NOTE: This function can not find a modifier that performs # one or more modifications if it has modifiers see if we can find # the unmodified version first src_node = None - if isinstance(dataset_key, DatasetID) and dataset_key.modifiers: - new_prereq = DatasetID( - *dataset_key[:-1] + (dataset_key.modifiers[:-1],)) - src_node, u = self._find_dependencies(new_prereq, **dfilter) - # Update the requested DatasetID with information from the src + if isinstance(dataset_key, DataQuery) and dataset_key.get('modifiers'): + new_dict = dataset_key.to_dict() + new_dict['modifiers'] = tuple(new_dict['modifiers'][:-1]) + new_prereq = DataQuery.from_dict(new_dict) + src_node, u = self._find_dependencies(new_prereq) + # Update the requested DatasetQuery with information from the src if src_node is not None: - dataset_key = self._update_modifier_key(dataset_key, - src_node.name) + dataset_key = self._update_modifier_id(dataset_key, + src_node.name) if u: return None, u - + elif isinstance(dataset_key, str): + dataset_key = DataQuery(name=dataset_key) try: compositor = self.get_compositor(dataset_key) except KeyError: raise KeyError("Can't find anything called {}".format(str(dataset_key))) - dataset_key = create_filtered_dsid(compositor.id, **dfilter) - root = Node(dataset_key, data=(compositor, [], [])) + + cid = compositor.id + root = Node(cid, data=(compositor, [], [])) if src_node is not None: self.add_child(root, src_node) root.data[1].append(src_node) + query = cid.create_dep_filter(dataset_key) # 2.1 get the prerequisites LOG.trace("Looking for composite prerequisites for: {}".format(dataset_key)) - prereqs, unknowns = self._get_compositor_prereqs(root, compositor.attrs['prerequisites'], **dfilter) + prereqs, unknowns = self._get_compositor_prereqs(root, compositor.attrs['prerequisites'], query=query) if unknowns: - # Should we remove all of the unknown nodes that were found + # Should we remove all of the unknown nodes that were found ? # if there is an unknown prerequisite are we in trouble? return None, unknowns root.data[1].extend(prereqs) + # Get the optionals LOG.trace("Looking for optional prerequisites for: {}".format(dataset_key)) optional_prereqs, _ = self._get_compositor_prereqs( - root, compositor.attrs['optional_prerequisites'], skip=True, **dfilter) + root, compositor.attrs['optional_prerequisites'], skip=True, query=query) root.data[2].extend(optional_prereqs) return root, set() - def get_filtered_item(self, dataset_key, **dfilter): - """Get the item matching *dataset_key* and *dfilter*.""" - dsid = create_filtered_dsid(dataset_key, **dfilter) - return self[dsid] - - def _find_dependencies(self, dataset_key, **dfilter): + def _find_dependencies(self, dataset_key, query=None): """Find the dependencies for *dataset_key*. Args: - dataset_key (str, float, DatasetID): Dataset identifier to locate - and find any additional - dependencies for. - **dfilter (dict): Additional filter parameters. See - `satpy.readers.get_key` for more details. + dataset_key (str, float, DataID, DataQuery): Dataset identifier to locate + and find any additional + dependencies for. + query (DataQuery): Additional filter parameters. See + `satpy.readers.get_key` for more details. """ # Special case: No required dependencies for this composite if dataset_key is None: return self.empty_node, set() - + if query is None: + dsq = dataset_key + else: + dsq = create_filtered_query(dataset_key, query) # 0 check if the *exact* dataset is already loaded try: - dsid = create_filtered_dsid(dataset_key, **dfilter) - node = self.getitem(dsid) + node = self.getitem(dsq) LOG.trace("Found exact dataset already loaded: {}".format(node.name)) return node, set() except KeyError: @@ -473,7 +472,7 @@ def _find_dependencies(self, dataset_key, **dfilter): # 1 try to get *best* dataset from reader try: - node = self._find_reader_dataset(dataset_key, **dfilter) + node = self._find_reader_dataset(dsq) except TooManyResults: LOG.warning("Too many possible datasets to load for {}".format(dataset_key)) return None, set([dataset_key]) @@ -485,9 +484,9 @@ def _find_dependencies(self, dataset_key, **dfilter): # 2 try to find a composite by name (any version of it is good enough) try: # assume that there is no such thing as a "better" composite - # version so if we find any DatasetIDs already loaded then + # version so if we find any DataIDs already loaded then # we want to use them - node = self.get_filtered_item(dataset_key, **dfilter) + node = self[dsq] LOG.trace("Composite already loaded:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node.name)) return node, set() except KeyError: @@ -496,7 +495,7 @@ def _find_dependencies(self, dataset_key, **dfilter): # 3 try to find a composite that matches try: - node, unknowns = self._find_compositor(dataset_key, **dfilter) + node, unknowns = self._find_compositor(dsq) LOG.trace("Found composite:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node and node.name)) except KeyError: node = None @@ -505,12 +504,12 @@ def _find_dependencies(self, dataset_key, **dfilter): return node, unknowns - def find_dependencies(self, dataset_keys, **dfilter): + def find_dependencies(self, dataset_keys, query=None): """Create the dependency tree. Args: - dataset_keys (iterable): Strings or DatasetIDs to find dependencies for - **dfilter (dict): Additional filter parameters. See + dataset_keys (iterable): Strings, DataIDs, DataQuerys to find dependencies for + query (DataQuery): Additional filter parameters. See `satpy.readers.get_key` for more details. Returns: @@ -519,11 +518,11 @@ def find_dependencies(self, dataset_keys, **dfilter): """ unknown_datasets = set() for key in dataset_keys.copy(): - n, unknowns = self._find_dependencies(key, **dfilter) + n, unknowns = self._find_dependencies(key, query) - dataset_keys.discard(key) # remove old non-DatasetID + dataset_keys.discard(key) # remove old non-DataID if n is not None: - dataset_keys.add(n.name) # add equivalent DatasetID + dataset_keys.add(n.name) # add equivalent DataID if unknowns: unknown_datasets.update(unknowns) continue diff --git a/satpy/readers/__init__.py b/satpy/readers/__init__.py index 03794ab28c..82be287f02 100644 --- a/satpy/readers/__init__.py +++ b/satpy/readers/__init__.py @@ -18,10 +18,10 @@ """Shared objects of the various reader classes.""" import logging -import numbers import os import warnings from datetime import datetime, timedelta +import numpy as np import yaml @@ -32,8 +32,7 @@ from satpy.config import (config_search_paths, get_environ_config_dir, glob_config) -from satpy.dataset import DATASET_KEYS, DatasetID -from satpy import CALIBRATION_ORDER +from satpy.dataset import DataID, minimal_default_keys_config, create_filtered_query LOG = logging.getLogger(__name__) @@ -62,113 +61,64 @@ def _wl_dist(wl_a, wl_b): def get_best_dataset_key(key, choices): - """Choose the "best" `DatasetID` from `choices` based on `key`. + """Choose the "best" `DataID` from `choices` based on `key`. - The best key is chosen based on the follow criteria: - - 1. Central wavelength is nearest to the `key` wavelength if - specified. - 2. Least modified dataset if `modifiers` is `None` in `key`. - Otherwise, the modifiers are ignored. - 3. Highest calibration if `calibration` is `None` in `key`. - Calibration priority is chosen by `satpy.CALIBRATION_ORDER`. - 4. Best resolution (smallest number) if `resolution` is `None` - in `key`. Otherwise, the resolution is ignored. + To see how the keys are sorted, refer to `:meth:satpy.datasets.DataQuery.sort_dataids`. This function assumes `choices` has already been filtered to only include datasets that match the provided `key`. Args: - key (DatasetID): Query parameters to sort `choices` by. - choices (iterable): `DatasetID` objects to sort through to determine + key (DataQuery): Query parameters to sort `choices` by. + choices (iterable): `DataID` objects to sort through to determine the best dataset. - Returns: List of best `DatasetID`s from `choices`. If there is more + Returns: List of best `DataID`s from `choices`. If there is more than one element this function could not choose between the available datasets. """ - # Choose the wavelength closest to the choice - if key.wavelength is not None and choices: - # find the dataset with a central wavelength nearest to the - # requested wavelength - nearest_wl = min([_wl_dist(key.wavelength, x.wavelength) - for x in choices if x.wavelength is not None]) - choices = [c for c in choices - if _wl_dist(key.wavelength, c.wavelength) == nearest_wl] - if key.modifiers is None and choices: - num_modifiers = min(len(x.modifiers or tuple()) for x in choices) - choices = [c for c in choices if len( - c.modifiers or tuple()) == num_modifiers] - if key.calibration is None and choices: - best_cal = [x.calibration for x in choices if x.calibration] - if best_cal: - best_cal = min(best_cal, key=lambda x: CALIBRATION_ORDER[x]) - choices = [c for c in choices if c.calibration == best_cal] - if key.resolution is None and choices: - low_res = [x.resolution for x in choices if x.resolution] - if low_res: - low_res = min(low_res) - choices = [c for c in choices if c.resolution == low_res] - if key.level is None and choices: - low_level = [x.level for x in choices if x.level] - if low_level: - low_level = max(low_level) - choices = [c for c in choices if c.level == low_level] - - return choices - - -def filter_keys_by_dataset_id(did, key_container): - """Filer provided key iterable by the provided `DatasetID`. + sorted_choices, distances = key.sort_dataids(choices) + if len(sorted_choices) == 0 or distances[0] is np.inf: + return [] + else: + return [choice for choice, distance in zip(sorted_choices, distances) if distance == distances[0]] + + +def filter_keys_by_dataset_query(dquery, key_container): + """Filer provided key iterable by the provided `DataQuery`. Note: The `modifiers` attribute of `did` should be `None` to allow for **any** modifier in the results. Args: - did (DatasetID): Query parameters to match in the `key_container`. - key_container (iterable): Set, list, tuple, or dict of `DatasetID` + dquery (DataQuery): Query parameters to match in the `key_container`. + key_container (iterable): Set, list, tuple, or dict of `DataID` keys. Returns (list): List of keys matching the provided parameters in no specific order. """ - keys = iter(key_container) - - for key in DATASET_KEYS: - if getattr(did, key) is not None: - if key == "wavelength": - keys = [k for k in keys - if (getattr(k, key) is not None and - DatasetID.wavelength_match(getattr(k, key), - getattr(did, key)))] - else: - keys = [k for k in keys - if getattr(k, key) is not None and getattr(k, key) - == getattr(did, key)] - - return keys + return dquery.filter_dataids(key_container) -def get_key(key, key_container, num_results=1, best=True, - resolution=None, calibration=None, polarization=None, - level=None, modifiers=None): +def get_key(key, key_container, num_results=1, best=True, query=None, + **kwargs): """Get the fully-specified key best matching the provided key. Only the best match is returned if `best` is `True` (default). See `get_best_dataset_key` for more information on how this is determined. - The `resolution` and other identifier keywords are provided as a - convenience to filter by multiple parameters at once without having - to filter by multiple `key` inputs. + `query` is provided as a convenience to filter by multiple parameters + at once without having to filter by multiple `key` inputs. Args: - key (DatasetID): DatasetID of query parameters to use for + key (DataID): DataID of query parameters to use for searching. Any parameter that is `None` is considered a wild card and any match is accepted. - key_container (dict or set): Container of DatasetID objects that + key_container (dict or set): Container of DataID objects that uses hashing to quickly access items. num_results (int): Number of results to return. Use `0` for all matching results. If `1` then the single matching @@ -176,68 +126,36 @@ def get_key(key, key_container, num_results=1, best=True, (default: 1) best (bool): Sort results to get "best" result first (default: True). See `get_best_dataset_key` for details. - resolution (float, int, or list): Resolution of the dataset in - dataset units (typically - meters). This can also be a - list of these numbers. - calibration (str or list): Dataset calibration - (ex.'reflectance'). This can also be a - list of these strings. - polarization (str or list): Dataset polarization - (ex.'V'). This can also be a + query (DataQuery): filter for the key which can contain for example: + resolution (float, int, or list): Resolution of the dataset in + dataset units (typically + meters). This can also be a + list of these numbers. + calibration (str or list): Dataset calibration + (ex.'reflectance'). This can also be a list of these strings. - level (number or list): Dataset level (ex. 100). This can also be a - list of these numbers. - modifiers (list): Modifiers applied to the dataset. Unlike - resolution and calibration this is the exact - desired list of modifiers for one dataset, not - a list of possible modifiers. + polarization (str or list): Dataset polarization + (ex.'V'). This can also be a + list of these strings. + level (number or list): Dataset level (ex. 100). This can also be a + list of these numbers. + modifiers (list): Modifiers applied to the dataset. Unlike + resolution and calibration this is the exact + desired list of modifiers for one dataset, not + a list of possible modifiers. - Returns (list or DatasetID): Matching key(s) + Returns (list or DataID): Matching key(s) Raises: KeyError if no matching results or if more than one result is found when `num_results` is `1`. """ - if isinstance(key, numbers.Number): - # we want this ID to act as a query so we set modifiers to None - # meaning "we don't care how many modifiers it has". - key = DatasetID(wavelength=key, modifiers=None) - elif isinstance(key, str): - # ID should act as a query (see wl comment above) - key = DatasetID(name=key, modifiers=None) - elif not isinstance(key, DatasetID): - raise ValueError("Expected 'DatasetID', str, or number dict key, " - "not {}".format(str(type(key)))) - - res = filter_keys_by_dataset_id(key, key_container) - - # further filter by other parameters - if resolution is not None: - if not isinstance(resolution, (list, tuple)): - resolution = (resolution, ) - res = [k for k in res - if k.resolution is not None and k.resolution in resolution] - if polarization is not None: - if not isinstance(polarization, (list, tuple)): - polarization = (polarization, ) - res = [k for k in res - if k.polarization is not None and k.polarization in - polarization] - if calibration is not None: - if not isinstance(calibration, (list, tuple)): - calibration = (calibration, ) - res = [k for k in res - if k.calibration is not None and k.calibration in calibration] - if level is not None: - if not isinstance(level, (list, tuple)): - level = (level, ) - res = [k for k in res - if k.level is not None and k.level in level] - if modifiers is not None: - res = [k for k in res - if k.modifiers is not None and k.modifiers == modifiers] + key = create_filtered_query(key, query) + + res = filter_keys_by_dataset_query(key, key_container) + if not res: + raise KeyError("No dataset matching '{}' found".format(str(key))) if best: res = get_best_dataset_key(key, res) @@ -255,9 +173,9 @@ def get_key(key, key_container, num_results=1, best=True, class DatasetDict(dict): - """Special dictionary object that can handle dict operations based on dataset name, wavelength, or DatasetID. + """Special dictionary object that can handle dict operations based on dataset name, wavelength, or DataID. - Note: Internal dictionary keys are `DatasetID` objects. + Note: Internal dictionary keys are `DataID` objects. """ @@ -266,9 +184,9 @@ def keys(self, names=False, wavelengths=False): # sort keys so things are a little more deterministic (.keys() is not) keys = sorted(super(DatasetDict, self).keys()) if names: - return (k.name for k in keys) + return (k.get('name') for k in keys) elif wavelengths: - return (k.wavelength for k in keys) + return (k.get('wavelength') for k in keys) else: return keys @@ -276,12 +194,12 @@ def get_key(self, match_key, num_results=1, best=True, **dfilter): """Get multiple fully-specified keys that match the provided query. Args: - key (DatasetID): DatasetID of query parameters to use for - searching. Any parameter that is `None` - is considered a wild card and any match is - accepted. Can also be a string representing the - dataset name or a number representing the dataset - wavelength. + key (DataID): DataID of query parameters to use for + searching. Any parameter that is `None` + is considered a wild card and any match is + accepted. Can also be a string representing the + dataset name or a number representing the dataset + wavelength. num_results (int): Number of results to return. If `0` return all, if `1` return only that element, otherwise return a list of matching keys. @@ -292,7 +210,7 @@ def get_key(self, match_key, num_results=1, best=True, **dfilter): best=best, **dfilter) def getitem(self, item): - """Get Node when we know the *exact* DatasetID.""" + """Get Node when we know the *exact* DataID.""" return super(DatasetDict, self).__getitem__(item) def __getitem__(self, item): @@ -314,14 +232,14 @@ def get(self, key, default=None): def __setitem__(self, key, value): """Support assigning 'Dataset' objects or dictionaries of metadata.""" - d = value + value_dict = value if hasattr(value, 'attrs'): # xarray.DataArray objects - d = value.attrs - # use value information to make a more complete DatasetID - if not isinstance(key, DatasetID): - if not isinstance(d, dict): - raise ValueError("Key must be a DatasetID when value is not an xarray DataArray or dict") + value_dict = value.attrs + # use value information to make a more complete DataID + if not isinstance(key, DataID): + if not isinstance(value_dict, dict): + raise ValueError("Key must be a DataID when value is not an xarray DataArray or dict") old_key = key try: key = self.get_key(key) @@ -329,37 +247,39 @@ def __setitem__(self, key, value): if isinstance(old_key, str): new_name = old_key else: - new_name = d.get("name") - # this is a new key and it's not a full DatasetID tuple - key = DatasetID(name=new_name, - resolution=d.get("resolution"), - wavelength=d.get("wavelength"), - polarization=d.get("polarization"), - calibration=d.get("calibration"), - level=d.get("level"), - modifiers=d.get("modifiers", tuple())) - if key.name is None and key.wavelength is None: + new_name = value_dict.get("name") + # this is a new key and it's not a full DataID tuple + if new_name is None and value_dict.get('wavelength') is None: raise ValueError("One of 'name' or 'wavelength' attrs " "values should be set.") + try: + id_keys = value_dict['_satpy_id'].id_keys + except KeyError: + try: + id_keys = value_dict['_satpy_id_keys'] + except KeyError: + id_keys = minimal_default_keys_config + value_dict['name'] = new_name + key = DataID(id_keys, **value_dict) + if hasattr(value, 'attrs') and 'name' not in value.attrs: + value.attrs['name'] = new_name # update the 'value' with the information contained in the key - if isinstance(d, dict): - d["name"] = key.name - # XXX: What should users be allowed to modify? - d["resolution"] = key.resolution - d["calibration"] = key.calibration - d["polarization"] = key.polarization - d["level"] = key.level - d["modifiers"] = key.modifiers - # you can't change the wavelength of a dataset, that doesn't make - # sense - if "wavelength" in d and d["wavelength"] != key.wavelength: - raise TypeError("Can't change the wavelength of a dataset") + try: + new_info = key.to_dict() + except AttributeError: + new_info = key + if isinstance(value_dict, dict): + value_dict.update(new_info) + + if hasattr(value, 'attrs'): + if isinstance(key, DataID): + value.attrs['_satpy_id'] = key return super(DatasetDict, self).__setitem__(key, value) def contains(self, item): - """Check contains when we know the *exact* DatasetID.""" + """Check contains when we know the *exact* DataID.""" return super(DatasetDict, self).__contains__(item) def __contains__(self, item): @@ -679,6 +599,7 @@ def find_files_and_readers(start_time=None, end_time=None, base_dir=None, filter_parameters=None, reader_kwargs=None, missing_ok=False, fs=None): """Find files matching the provided parameters. + Use `start_time` and/or `end_time` to limit found filenames by the times in the filenames (not the internal file metadata). Files are matched if they fall anywhere within the range specified by these parameters. diff --git a/satpy/readers/aapp_l1b.py b/satpy/readers/aapp_l1b.py index 8e370e76b1..d59c2d41a9 100644 --- a/satpy/readers/aapp_l1b.py +++ b/satpy/readers/aapp_l1b.py @@ -108,24 +108,24 @@ def end_time(self): def get_dataset(self, key, info): """Get a dataset from the file.""" - if key.name in CHANNEL_NAMES: + if key['name'] in CHANNEL_NAMES: dataset = self.calibrate(key) - elif key.name in ['longitude', 'latitude']: + elif key['name'] in ['longitude', 'latitude']: if self.lons is None or self.lats is None: self.navigate() - if key.name == 'longitude': + if key['name'] == 'longitude': dataset = create_xarray(self.lons) else: dataset = create_xarray(self.lats) dataset.attrs = info else: # Get sun-sat angles - if key.name in ANGLES: - if isinstance(getattr(self, ANGLES[key.name]), np.ndarray): - dataset = create_xarray(getattr(self, ANGLES[key.name])) + if key['name'] in ANGLES: + if isinstance(getattr(self, ANGLES[key['name']]), np.ndarray): + dataset = create_xarray(getattr(self, ANGLES[key['name']])) else: - dataset = self.get_angles(key.name) + dataset = self.get_angles(key['name']) else: - raise ValueError("Not a supported sun-sensor viewing angle: %s", key.name) + raise ValueError("Not a supported sun-sensor viewing angle: %s", key['name']) dataset.attrs.update({'platform_name': self.platform_name, 'sensor': self.sensor}) @@ -223,48 +223,48 @@ def calibrate(self, 'counts': '', 'radiance': 'W*m-2*sr-1*cm ?'} - if dataset_id.name in ("3a", "3b") and self._is3b is None: + if dataset_id['name'] in ("3a", "3b") and self._is3b is None: # Is it 3a or 3b: self._is3a = da.bitwise_and(da.from_array(self._data['scnlinbit'], chunks=LINE_CHUNK), 3) == 0 self._is3b = da.bitwise_and(da.from_array(self._data['scnlinbit'], chunks=LINE_CHUNK), 3) == 1 - if dataset_id.name == '3a' and not np.any(self._is3a): + if dataset_id['name'] == '3a' and not np.any(self._is3a): raise ValueError("Empty dataset for channel 3A") - if dataset_id.name == '3b' and not np.any(self._is3b): + if dataset_id['name'] == '3b' and not np.any(self._is3b): raise ValueError("Empty dataset for channel 3B") try: - vis_idx = ['1', '2', '3a'].index(dataset_id.name) + vis_idx = ['1', '2', '3a'].index(dataset_id['name']) ir_idx = None except ValueError: vis_idx = None - ir_idx = ['3b', '4', '5'].index(dataset_id.name) + ir_idx = ['3b', '4', '5'].index(dataset_id['name']) mask = True if vis_idx is not None: - coeffs = calib_coeffs.get('ch' + dataset_id.name) - if dataset_id.name == '3a': + coeffs = calib_coeffs.get('ch' + dataset_id['name']) + if dataset_id['name'] == '3a': mask = self._is3a[:, None] ds = create_xarray( _vis_calibrate(self._data, vis_idx, - dataset_id.calibration, + dataset_id['calibration'], pre_launch_coeffs, coeffs, mask=mask)) else: - if dataset_id.name == '3b': + if dataset_id['name'] == '3b': mask = self._is3b[:, None] ds = create_xarray( _ir_calibrate(self._header, self._data, ir_idx, - dataset_id.calibration, + dataset_id['calibration'], mask=mask)) - ds.attrs['units'] = units[dataset_id.calibration] + ds.attrs['units'] = units[dataset_id['calibration']] ds.attrs.update(dataset_id._asdict()) return ds diff --git a/satpy/readers/abi_l1b.py b/satpy/readers/abi_l1b.py index 0460c0e2cc..67a73b6d63 100644 --- a/satpy/readers/abi_l1b.py +++ b/satpy/readers/abi_l1b.py @@ -37,17 +37,17 @@ class NC_ABI_L1B(NC_ABI_BASE): def get_dataset(self, key, info): """Load a dataset.""" - logger.debug('Reading in get_dataset %s.', key.name) + logger.debug('Reading in get_dataset %s.', key['name']) radiances = self['Rad'] - if key.calibration == 'reflectance': + if key['calibration'] == 'reflectance': logger.debug("Calibrating to reflectances") res = self._vis_calibrate(radiances) - elif key.calibration == 'brightness_temperature': + elif key['calibration'] == 'brightness_temperature': logger.debug("Calibrating to brightness temperatures") res = self._ir_calibrate(radiances) - elif key.calibration != 'radiance': - raise ValueError("Unknown calibration '{}'".format(key.calibration)) + elif key['calibration'] != 'radiance': + raise ValueError("Unknown calibration '{}'".format(key['calibration'])) else: res = radiances diff --git a/satpy/readers/acspo.py b/satpy/readers/acspo.py index 306f8e8f8d..616c5df046 100644 --- a/satpy/readers/acspo.py +++ b/satpy/readers/acspo.py @@ -62,14 +62,14 @@ def get_shape(self, ds_id, ds_info): """Get numpy array shape for the specified dataset. Args: - ds_id (DatasetID): ID of dataset that will be loaded + ds_id (DataID): ID of dataset that will be loaded ds_info (dict): Dictionary of dataset information from config file Returns: tuple: (rows, cols) """ - var_path = ds_info.get('file_key', '{}'.format(ds_id.name)) + var_path = ds_info.get('file_key', '{}'.format(ds_id['name'])) if var_path + '/shape' not in self: # loading a scalar value shape = 1 @@ -98,7 +98,7 @@ def end_time(self): def get_metadata(self, dataset_id, ds_info): """Collect various metadata about the specified dataset.""" - var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', '{}'.format(dataset_id['name'])) shape = self.get_shape(dataset_id, ds_info) units = self[var_path + '/attr/units'] info = getattr(self[var_path], 'attrs', {}) @@ -121,7 +121,7 @@ def get_metadata(self, dataset_id, ds_info): def get_dataset(self, dataset_id, ds_info): """Load data array and metadata from file on disk.""" - var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', '{}'.format(dataset_id['name'])) metadata = self.get_metadata(dataset_id, ds_info) shape = metadata['shape'] file_shape = self[var_path + '/shape'] diff --git a/satpy/readers/agri_l1.py b/satpy/readers/agri_l1.py index cea1a560ed..7c2d92443f 100644 --- a/satpy/readers/agri_l1.py +++ b/satpy/readers/agri_l1.py @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Advanced Geostationary Radiation Imager reader for the Level_1 HDF format +"""Advanced Geostationary Radiation Imager reader for the Level_1 HDF format. The files read by this reader are described in the official Real Time Data Service: @@ -46,15 +46,17 @@ class HDF_AGRI_L1(HDF5FileHandler): + """AGRI l1 file handler.""" def __init__(self, filename, filename_info, filetype_info): + """Init filehandler.""" super(HDF_AGRI_L1, self).__init__(filename, filename_info, filetype_info) def get_dataset(self, dataset_id, ds_info): """Load a dataset.""" - logger.debug('Reading in get_dataset %s.', dataset_id.name) - file_key = ds_info.get('file_key', dataset_id.name) - lut_key = ds_info.get('lut_key', dataset_id.name) + logger.debug('Reading in get_dataset %s.', dataset_id['name']) + file_key = ds_info.get('file_key', dataset_id['name']) + lut_key = ds_info.get('lut_key', dataset_id['name']) data = self.get(file_key) lut = self.get(lut_key) if data.ndim >= 2: @@ -121,9 +123,10 @@ def get_dataset(self, dataset_id, ds_info): return data def get_area_def(self, key): + """Get the area definition.""" # Coordination Group for Meteorological Satellites LRIT/HRIT Global Specification # https://www.cgms-info.org/documents/cgms-lrit-hrit-global-specification-(v2-8-of-30-oct-2013).pdf - res = key.resolution + res = key['resolution'] pdict = {} pdict['coff'] = _COFF_list[_resolution_list.index(res)] pdict['loff'] = _LOFF_list[_resolution_list.index(res)] @@ -145,13 +148,13 @@ def get_area_def(self, key): pdict['a_desc'] = "AGRI {} area".format(self.filename_info['observation_type']) - if (key.name in b500): + if (key['name'] in b500): pdict['a_name'] = self.filename_info['observation_type']+'_500m' pdict['p_id'] = 'FY-4A, 500m' - elif (key.name in b1000): + elif (key['name'] in b1000): pdict['a_name'] = self.filename_info['observation_type']+'_1000m' pdict['p_id'] = 'FY-4A, 1000m' - elif (key.name in b2000): + elif (key['name'] in b2000): pdict['a_name'] = self.filename_info['observation_type']+'_2000m' pdict['p_id'] = 'FY-4A, 2000m' else: @@ -172,7 +175,7 @@ def get_area_def(self, key): return area def dn2(self, dn, calibration, slope, offset): - """Convert digital number (DN) to reflectance or radiance + """Convert digital number (DN) to reflectance or radiance. Args: dn: Raw detector digital number @@ -196,7 +199,8 @@ def _getitem(block, lut): return lut[block] def calibrate(self, data, lut): - """Calibrate digital number (DN) to brightness_temperature + """Calibrate digital number (DN) to brightness_temperature. + Args: dn: Raw detector digital number lut: the look up table @@ -214,10 +218,12 @@ def calibrate(self, data, lut): @property def start_time(self): + """Get the start time.""" start_time = self['/attr/Observing Beginning Date'] + 'T' + self['/attr/Observing Beginning Time'] + 'Z' return datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S.%fZ') @property def end_time(self): + """Get the end time.""" end_time = self['/attr/Observing Ending Date'] + 'T' + self['/attr/Observing Ending Time'] + 'Z' return datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S.%fZ') diff --git a/satpy/readers/ahi_hsd.py b/satpy/readers/ahi_hsd.py index 77290f4075..1ac8d3d924 100644 --- a/satpy/readers/ahi_hsd.py +++ b/satpy/readers/ahi_hsd.py @@ -220,7 +220,7 @@ class AHIHSDFileHandler(BaseFileHandler): - """AHI standard format reader + """AHI standard format reader. The AHI sensor produces data for some pixels outside the Earth disk (i,e: atmospheric limb or deep space pixels). @@ -308,15 +308,18 @@ def __init__(self, filename, filename_info, filetype_info, self.calib_mode = calib_mode.upper() def __del__(self): + """Delete the object.""" if (self.is_zipped and os.path.exists(self.filename)): os.remove(self.filename) @property def start_time(self): + """Get the start time.""" return datetime(1858, 11, 17) + timedelta(days=float(self.basic_info['observation_start_time'])) @property def end_time(self): + """Get the end time.""" return datetime(1858, 11, 17) + timedelta(days=float(self.basic_info['observation_end_time'])) @property @@ -332,9 +335,11 @@ def scheduled_time(self): second=dt % 60, microsecond=0) def get_dataset(self, key, info): + """Get the dataset.""" return self.read_band(key, info) def get_area_def(self, dsid): + """Get the area definition.""" del dsid pdict = {} @@ -364,13 +369,13 @@ def get_area_def(self, dsid): return area def _check_fpos(self, fp_, fpos, offset, block): - """Check file position matches blocksize""" + """Check file position matches blocksize.""" if (fp_.tell() + offset != fpos): warnings.warn("Actual "+block+" header size does not match expected") return def _read_header(self, fp_): - """Read header""" + """Read header.""" header = {} fpos = 0 @@ -427,7 +432,7 @@ def _read_header(self, fp_): ("shift_amount_for_line_direction", "f4"), ]) corrections = [] - for i in range(ncorrs): + for _i in range(ncorrs): corrections.append(np.fromfile(fp_, dtype=dtype, count=1)) fpos = fpos + int(header['block8']['blocklength']) self._check_fpos(fp_, fpos, 40, 'block8') @@ -443,7 +448,7 @@ def _read_header(self, fp_): ("observation_time", "f8"), ]) lines_and_times = [] - for i in range(numobstimes): + for _i in range(numobstimes): lines_and_times.append(np.fromfile(fp_, dtype=dtype, count=1)) @@ -462,7 +467,7 @@ def _read_header(self, fp_): num_err_info_data = header["block10"][ 'number_of_error_info_data'][0] err_info_data = [] - for i in range(num_err_info_data): + for _i in range(num_err_info_data): err_info_data.append(np.fromfile(fp_, dtype=dtype, count=1)) header['error_information_data'] = err_info_data fpos = fpos + int(header['block10']['blocklength']) @@ -477,7 +482,7 @@ def _read_header(self, fp_): return header def _read_data(self, fp_, header): - """Read data block""" + """Read data block.""" nlines = int(header["block2"]['number_of_lines'][0]) ncols = int(header["block2"]['number_of_columns'][0]) return da.from_array(np.memmap(self.filename, offset=fp_.tell(), @@ -485,13 +490,13 @@ def _read_data(self, fp_, header): chunks=CHUNK_SIZE) def _mask_invalid(self, data, header): - """Mask invalid data""" + """Mask invalid data.""" invalid = da.logical_or(data == header['block5']["count_value_outside_scan_pixels"][0], data == header['block5']["count_value_error_pixels"][0]) return da.where(invalid, np.float32(np.nan), data) def _mask_space(self, data): - """Mask space pixels""" + """Mask space pixels.""" return data.where(get_geostationary_mask(self.area)) def read_band(self, key, info): @@ -506,7 +511,7 @@ def read_band(self, key, info): logger.debug("Reading time " + str(datetime.now() - tic)) # Calibrate - res = self.calibrate(res, key.calibration) + res = self.calibrate(res, key['calibration']) # Get actual satellite position. For altitude use the ellipsoid radius at the SSP. actual_lon = float(self.nav_info['SSP_longitude']) @@ -523,7 +528,7 @@ def read_band(self, key, info): wavelength=info['wavelength'], resolution='resolution', id=key, - name=key.name, + name=key['name'], scheduled_time=self.scheduled_time, platform_name=self.platform_name, sensor=self.sensor, @@ -551,7 +556,7 @@ def read_band(self, key, info): return res def calibrate(self, data, calibration): - """Calibrate the data""" + """Calibrate the data.""" tic = datetime.now() if calibration == 'counts': @@ -569,7 +574,6 @@ def calibrate(self, data, calibration): def convert_to_radiance(self, data): """Calibrate to radiance.""" - bnum = self._header["block5"]['band_number'][0] # Check calibration mode and select corresponding coefficients if self.calib_mode == "UPDATE" and bnum < 7: diff --git a/satpy/readers/ami_l1b.py b/satpy/readers/ami_l1b.py index 85e6c3f27c..81573c03ef 100644 --- a/satpy/readers/ami_l1b.py +++ b/satpy/readers/ami_l1b.py @@ -127,7 +127,7 @@ def get_orbital_parameters(self): def get_dataset(self, dataset_id, ds_info): """Load a dataset as a xarray DataArray.""" - file_key = ds_info.get('file_key', dataset_id.name) + file_key = ds_info.get('file_key', dataset_id['name']) data = self.nc[file_key] # hold on to attributes for later attrs = data.attrs @@ -151,18 +151,18 @@ def get_dataset(self, dataset_id, ds_info): gain = self.nc.attrs['DN_to_Radiance_Gain'] offset = self.nc.attrs['DN_to_Radiance_Offset'] - if dataset_id.calibration in ('radiance', 'reflectance', 'brightness_temperature'): + if dataset_id['calibration'] in ('radiance', 'reflectance', 'brightness_temperature'): data = gain * data + offset - if dataset_id.calibration == 'reflectance': + if dataset_id['calibration'] == 'reflectance': # depends on the radiance calibration above rad_to_alb = self.nc.attrs['Radiance_to_Albedo_c'] if ds_info.get('units') == '%': rad_to_alb *= 100 data = data * rad_to_alb - elif dataset_id.calibration == 'brightness_temperature': + elif dataset_id['calibration'] == 'brightness_temperature': data = self._calibrate_ir(dataset_id, data) - elif dataset_id.calibration not in ('counts', 'radiance'): - raise ValueError("Unknown calibration: '{}'".format(dataset_id.calibration)) + elif dataset_id['calibration'] not in ('counts', 'radiance'): + raise ValueError("Unknown calibration: '{}'".format(dataset_id['calibration'])) for attr_name in ('standard_name', 'units'): attrs[attr_name] = ds_info[attr_name] @@ -178,7 +178,7 @@ def _calibrate_ir(self, dataset_id, data): if self.calib_mode == 'PYSPECTRAL': # depends on the radiance calibration above # Convert um to m^-1 (SI units for pyspectral) - wn = 1 / (dataset_id.wavelength[1] / 1e6) + wn = 1 / (dataset_id['wavelength'][1] / 1e6) # Convert cm^-1 (wavenumbers) and (mW/m^2)/(str/cm^-1) (radiance data) # to SI units m^-1, mW*m^-3*str^-1. bt_data = rad2temp(wn, data.data * 1e-5) @@ -201,7 +201,7 @@ def _calibrate_ir(self, dataset_id, data): hval = self.nc.attrs['Plank_constant_h'] # Compute wavenumber as cm-1 - wn = (10000 / dataset_id.wavelength[1]) * 100 + wn = (10000 / dataset_id['wavelength'][1]) * 100 # Convert radiance to effective brightness temperature e1 = (2 * hval * cval * cval) * np.power(wn, 3) diff --git a/satpy/readers/amsr2_l1b.py b/satpy/readers/amsr2_l1b.py index 718a728e47..1f40014d9b 100644 --- a/satpy/readers/amsr2_l1b.py +++ b/satpy/readers/amsr2_l1b.py @@ -15,14 +15,16 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Reader for AMSR2 L1B files in HDF5 format. -""" +"""Reader for AMSR2 L1B files in HDF5 format.""" from satpy.readers.hdf5_utils import HDF5FileHandler class AMSR2L1BFileHandler(HDF5FileHandler): + """File handler for AMSR2 l1b.""" + def get_metadata(self, ds_id, ds_info): + """Get the metadata.""" var_path = ds_info['file_key'] info = getattr(self[var_path], 'attrs', {}) info.update(ds_info) @@ -42,7 +44,7 @@ def get_shape(self, ds_id, ds_info): var_path = ds_info['file_key'] shape = self[var_path + '/shape'] if ((ds_info.get('standard_name') == "longitude" or ds_info.get('standard_name') == "latitude") and - ds_id.resolution == 10000): + ds_id['resolution'] == 10000): return shape[0], int(shape[1] / 2) return shape @@ -55,7 +57,7 @@ def get_dataset(self, ds_id, ds_info): data = self[var_path] if ((ds_info.get('standard_name') == "longitude" or ds_info.get('standard_name') == "latitude") and - ds_id.resolution == 10000): + ds_id['resolution'] == 10000): # FIXME: Lower frequency channels need CoRegistration parameters applied data = data[:, ::2] * self[var_path + "/attr/SCALE FACTOR"] else: diff --git a/satpy/readers/amsr2_l2.py b/satpy/readers/amsr2_l2.py index ea1b27c578..f241861c22 100644 --- a/satpy/readers/amsr2_l2.py +++ b/satpy/readers/amsr2_l2.py @@ -21,13 +21,15 @@ class AMSR2L2FileHandler(AMSR2L1BFileHandler): + """AMSR2 level 2 file handler.""" + def mask_dataset(self, ds_info, data): - """Mask data with the fill value""" + """Mask data with the fill value.""" fill_value = ds_info.get('fill_value', 65535) return data.where(data != fill_value) def scale_dataset(self, var_path, data): - """scale data with the scale factor attribute""" + """Scale data with the scale factor attribute.""" return data * self[var_path + "/attr/SCALE FACTOR"] def get_dataset(self, ds_id, ds_info): diff --git a/satpy/readers/avhrr_l1b_gaclac.py b/satpy/readers/avhrr_l1b_gaclac.py index 46db6331ce..b8c29240a6 100644 --- a/satpy/readers/avhrr_l1b_gaclac.py +++ b/satpy/readers/avhrr_l1b_gaclac.py @@ -130,9 +130,9 @@ def read_raw_data(self): def get_dataset(self, key, info): """Get the dataset.""" self.read_raw_data() - if key.name in ['latitude', 'longitude']: + if key['name'] in ['latitude', 'longitude']: # Lats/lons are buffered by the reader - if key.name == 'latitude': + if key['name'] == 'latitude': _, data = self.reader.get_lonlat() else: data, _ = self.reader.get_lonlat() @@ -141,11 +141,11 @@ def get_dataset(self, key, info): # pixel has a lat/lon coordinate xdim = 'x' if self.interpolate_coords else 'x_every_eighth' xcoords = None - elif key.name in ANGLES: + elif key['name'] in ANGLES: data = self._get_angle(key) xdim = 'x' if self.interpolate_coords else 'x_every_eighth' xcoords = None - elif key.name == 'qual_flags': + elif key['name'] == 'qual_flags': data = self.reader.get_qual_flags() xdim = 'num_flags' xcoords = ['Scan line number', @@ -155,13 +155,13 @@ def get_dataset(self, key, info): 'Solar contamination of blackbody in channels 3', 'Solar contamination of blackbody in channels 4', 'Solar contamination of blackbody in channels 5'] - elif key.name.upper() in self.chn_dict: + elif key['name'].upper() in self.chn_dict: # Read and calibrate channel data data = self._get_channel(key) xdim = 'x' xcoords = None else: - raise ValueError('Unknown dataset: {}'.format(key.name)) + raise ValueError('Unknown dataset: {}'.format(key['name'])) # Update start/end time using the actual scanline timestamps times = self.reader.get_times() @@ -247,8 +247,8 @@ def _slice(self, data): def _get_channel(self, key): """Get channel and buffer results.""" - name = key.name - calibration = key.calibration + name = key['name'] + calibration = key['calibration'] if calibration == 'counts': if self.counts is None: counts = self.reader.get_counts() @@ -277,7 +277,7 @@ def _get_angle(self, key): 'solar_zenith_angle': sun_zenith, 'solar_azimuth_angle': sun_azi, 'sun_sensor_azimuth_difference_angle': rel_azi} - return self.angles[key.name] + return self.angles[key['name']] def _strip_invalid_lat(self): """Strip scanlines with invalid coordinates in the beginning/end of the orbit. diff --git a/satpy/readers/caliop_l2_cloud.py b/satpy/readers/caliop_l2_cloud.py index 52fe945e62..b8e9baae72 100644 --- a/satpy/readers/caliop_l2_cloud.py +++ b/satpy/readers/caliop_l2_cloud.py @@ -75,19 +75,19 @@ def get_filehandle(self): def get_dataset(self, key, info): """Read data from file and return the corresponding projectables.""" - if key.name in ['longitude', 'latitude']: + if key['name'] in ['longitude', 'latitude']: logger.debug('Reading coordinate arrays.') if self.lons is None or self.lats is None: self.lons, self.lats = self.get_lonlats() - if key.name == 'latitude': + if key['name'] == 'latitude': proj = Dataset(self.lats, id=key, **info) else: proj = Dataset(self.lons, id=key, **info) else: - data = self.get_sds_variable(key.name) + data = self.get_sds_variable(key['name']) proj = Dataset(data, id=key, **info) return proj diff --git a/satpy/readers/clavrx.py b/satpy/readers/clavrx.py index dd7adb1329..78d4e3e05c 100644 --- a/satpy/readers/clavrx.py +++ b/satpy/readers/clavrx.py @@ -15,8 +15,8 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Interface to CLAVR-X HDF4 products. -""" +"""Interface to CLAVR-X HDF4 products.""" + import os import logging import numpy as np @@ -34,6 +34,8 @@ class CLAVRXFileHandler(HDF4FileHandler): + """A file handler for CLAVRx files.""" + sensors = { 'MODIS': 'modis', 'VIIRS': 'viirs', @@ -63,23 +65,27 @@ class CLAVRXFileHandler(HDF4FileHandler): } def get_sensor(self, sensor): + """Get the sensor.""" for k, v in self.sensors.items(): if k in sensor: return v raise ValueError("Unknown sensor '{}'".format(sensor)) def get_platform(self, platform): + """Get the platform.""" for k, v in self.platforms.items(): if k in platform: return v return platform def get_rows_per_scan(self, sensor): + """Get number of rows per scan.""" for k, v in self.rows_per_scan.items(): if sensor.startswith(k): return v def get_nadir_resolution(self, sensor): + """Get nadir resolution.""" for k, v in self.nadir_resolution.items(): if sensor.startswith(k): return v @@ -91,14 +97,16 @@ def get_nadir_resolution(self, sensor): @property def start_time(self): + """Get the start time.""" return self.filename_info['start_time'] @property def end_time(self): + """Get the end time.""" return self.filename_info.get('end_time', self.start_time) def available_datasets(self, configured_datasets=None): - """Automatically determine datasets provided by this file""" + """Automatically determine datasets provided by this file.""" sensor = self.get_sensor(self['/attr/sensor']) nadir_resolution = self.get_nadir_resolution(sensor) coordinates = ('longitude', 'latitude') @@ -141,10 +149,12 @@ def available_datasets(self, configured_datasets=None): yield True, ds_info def get_shape(self, dataset_id, ds_info): - var_name = ds_info.get('file_key', dataset_id.name) + """Get the shape.""" + var_name = ds_info.get('file_key', dataset_id['name']) return self[var_name + '/shape'] def get_metadata(self, data_arr, ds_info): + """Get metadata.""" i = {} i.update(data_arr.attrs) i.update(ds_info) @@ -171,10 +181,11 @@ def get_metadata(self, data_arr, ds_info): return i def get_dataset(self, dataset_id, ds_info): - var_name = ds_info.get('file_key', dataset_id.name) + """Get a dataset.""" + var_name = ds_info.get('file_key', dataset_id['name']) data = self[var_name] - if dataset_id.resolution: - data.attrs['resolution'] = dataset_id.resolution + if dataset_id['resolution']: + data.attrs['resolution'] = dataset_id['resolution'] data.attrs = self.get_metadata(data, ds_info) fill = data.attrs.pop('_FillValue', None) factor = data.attrs.pop('scale_factor', None) @@ -214,8 +225,7 @@ def _area_extent(x, y, h): @staticmethod def _read_pug_fixed_grid(projection, distance_multiplier=1.0): - """Read from recent PUG format, where axes are in meters - """ + """Read from recent PUG format, where axes are in meters.""" a = projection.semi_major_axis h = projection.perspective_point_height b = projection.semi_minor_axis @@ -244,7 +254,9 @@ def _find_input_nc(self, l1b_base): return l1b_filenames[0] def _read_axi_fixed_grid(self, l1b_attr): - """CLAVR-x does not transcribe fixed grid parameters to its output + """Read a fixed grid. + + CLAVR-x does not transcribe fixed grid parameters to its output We have to recover that information from the original input file, which is partially named as L1B attribute diff --git a/satpy/readers/cmsaf_claas2.py b/satpy/readers/cmsaf_claas2.py index 99be0f8d49..70f10e2e39 100644 --- a/satpy/readers/cmsaf_claas2.py +++ b/satpy/readers/cmsaf_claas2.py @@ -7,6 +7,7 @@ class CLAAS2(NetCDF4FileHandler): """Handle CMSAF CLAAS-2 files.""" + def __init__(self, *args, **kwargs): """Initialise class.""" super().__init__(*args, **kwargs, cache_handle=False, @@ -15,7 +16,6 @@ def __init__(self, *args, **kwargs): @property def start_time(self): """Get start time from file.""" - # datetime module can't handle timezone identifier return datetime.datetime.fromisoformat( self["/attr/time_coverage_start"].rstrip("Z")) @@ -32,7 +32,6 @@ def available_datasets(self, configured_datasets=None): Return a generator that will yield the datasets available in the loaded files. See docstring in parent class for specification details. """ - # this method should work for any (CF-conform) NetCDF file, should it # be somewhere more generically available? Perhaps in the # `NetCDF4FileHandler`? @@ -64,13 +63,15 @@ def _get_dsinfo(self, var): return ds_info def get_dataset(self, dataset_id, info): - ds = self[dataset_id.name] + """Get the dataset.""" + ds = self[dataset_id['name']] if "time" in ds.dims: return ds.squeeze(["time"]) else: return ds def get_area_def(self, dataset_id): + """Get the area definition.""" return pyresample.geometry.AreaDefinition( "some_area_name", "on-the-fly area", diff --git a/satpy/readers/electrol_hrit.py b/satpy/readers/electrol_hrit.py index 34acb68a89..c2d97de643 100644 --- a/satpy/readers/electrol_hrit.py +++ b/satpy/readers/electrol_hrit.py @@ -273,7 +273,7 @@ def get_dataset(self, key, info): """Get the data from the files.""" res = super(HRITGOMSFileHandler, self).get_dataset(key, info) - res = self.calibrate(res, key.calibration) + res = self.calibrate(res, key['calibration']) res.attrs['units'] = info['units'] res.attrs['standard_name'] = info['standard_name'] res.attrs['wavelength'] = info['wavelength'] diff --git a/satpy/readers/eps_l1b.py b/satpy/readers/eps_l1b.py index 8578e31543..accae3b49c 100644 --- a/satpy/readers/eps_l1b.py +++ b/satpy/readers/eps_l1b.py @@ -291,59 +291,59 @@ def get_dataset(self, key, info): if self.sections is None: self._read_all() - if key.name in ['longitude', 'latitude']: + if key['name'] in ['longitude', 'latitude']: lons, lats = self.get_full_lonlats() - if key.name == 'longitude': + if key['name'] == 'longitude': dataset = create_xarray(lons) else: dataset = create_xarray(lats) - elif key.name in ['solar_zenith_angle', 'solar_azimuth_angle', - 'satellite_zenith_angle', 'satellite_azimuth_angle']: + elif key['name'] in ['solar_zenith_angle', 'solar_azimuth_angle', + 'satellite_zenith_angle', 'satellite_azimuth_angle']: sun_azi, sun_zen, sat_azi, sat_zen = self.get_full_angles() - if key.name == 'solar_zenith_angle': + if key['name'] == 'solar_zenith_angle': dataset = create_xarray(sun_zen) - elif key.name == 'solar_azimuth_angle': + elif key['name'] == 'solar_azimuth_angle': dataset = create_xarray(sun_azi) - if key.name == 'satellite_zenith_angle': + if key['name'] == 'satellite_zenith_angle': dataset = create_xarray(sat_zen) - elif key.name == 'satellite_azimuth_angle': + elif key['name'] == 'satellite_azimuth_angle': dataset = create_xarray(sat_azi) else: mask = None - if key.calibration == 'counts': + if key['calibration'] == 'counts': raise ValueError('calibration=counts is not supported! ' + 'This reader cannot return counts') - elif key.calibration not in ['reflectance', 'brightness_temperature', 'radiance']: - raise ValueError('calibration type ' + str(key.calibration) + + elif key['calibration'] not in ['reflectance', 'brightness_temperature', 'radiance']: + raise ValueError('calibration type ' + str(key['calibration']) + ' is not supported!') - if key.name in ['3A', '3a'] and self.three_a_mask is None: + if key['name'] in ['3A', '3a'] and self.three_a_mask is None: self.three_a_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 2 ** 16) - if key.name in ['3B', '3b'] and self.three_b_mask is None: + if key['name'] in ['3B', '3b'] and self.three_b_mask is None: self.three_b_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 0) - if key.name not in ["1", "2", "3a", "3A", "3b", "3B", "4", "5"]: - logger.info("Can't load channel in eps_l1b: " + str(key.name)) + if key['name'] not in ["1", "2", "3a", "3A", "3b", "3B", "4", "5"]: + logger.info("Can't load channel in eps_l1b: " + str(key['name'])) return - if key.name == "1": - if key.calibration == 'reflectance': + if key['name'] == "1": + if key['calibration'] == 'reflectance': array = radiance_to_refl(self["SCENE_RADIANCES"][:, 0, :], self["CH1_SOLAR_FILTERED_IRRADIANCE"]) else: array = self["SCENE_RADIANCES"][:, 0, :] - if key.name == "2": - if key.calibration == 'reflectance': + if key['name'] == "2": + if key['calibration'] == 'reflectance': array = radiance_to_refl(self["SCENE_RADIANCES"][:, 1, :], self["CH2_SOLAR_FILTERED_IRRADIANCE"]) else: array = self["SCENE_RADIANCES"][:, 1, :] - if key.name.lower() == "3a": - if key.calibration == 'reflectance': + if key['name'].lower() == "3a": + if key['calibration'] == 'reflectance': array = radiance_to_refl(self["SCENE_RADIANCES"][:, 2, :], self["CH3A_SOLAR_FILTERED_IRRADIANCE"]) else: @@ -352,8 +352,8 @@ def get_dataset(self, key, info): mask = np.empty(array.shape, dtype=bool) mask[:, :] = self.three_a_mask[:, np.newaxis] - if key.name.lower() == "3b": - if key.calibration == 'brightness_temperature': + if key['name'].lower() == "3b": + if key['calibration'] == 'brightness_temperature': array = radiance_to_bt(self["SCENE_RADIANCES"][:, 2, :], self["CH3B_CENTRAL_WAVENUMBER"], self["CH3B_CONSTANT1"], @@ -363,8 +363,8 @@ def get_dataset(self, key, info): mask = np.empty(array.shape, dtype=bool) mask[:, :] = self.three_b_mask[:, np.newaxis] - if key.name == "4": - if key.calibration == 'brightness_temperature': + if key['name'] == "4": + if key['calibration'] == 'brightness_temperature': array = radiance_to_bt(self["SCENE_RADIANCES"][:, 3, :], self["CH4_CENTRAL_WAVENUMBER"], self["CH4_CONSTANT1"], @@ -372,8 +372,8 @@ def get_dataset(self, key, info): else: array = self["SCENE_RADIANCES"][:, 3, :] - if key.name == "5": - if key.calibration == 'brightness_temperature': + if key['name'] == "5": + if key['calibration'] == 'brightness_temperature': array = radiance_to_bt(self["SCENE_RADIANCES"][:, 4, :], self["CH5_CENTRAL_WAVENUMBER"], self["CH5_CONSTANT1"], diff --git a/satpy/readers/fci_l1c_fdhsi.py b/satpy/readers/fci_l1c_fdhsi.py index 9c64179c31..17896140e0 100644 --- a/satpy/readers/fci_l1c_fdhsi.py +++ b/satpy/readers/fci_l1c_fdhsi.py @@ -66,7 +66,7 @@ change. Currently, for each channel, the pixel quality is available by ``_pixel_quality``. In the future, they will likely all be called ``pixel_quality`` and disambiguated by a to-be-decided property in the - `DatasetID`. + `DataID`. .. _RADTOBR: https://www.eumetsat.int/website/wcm/idc/idcplg?IdcService=GET_FILE&dDocName=PDF_EFFECT_RAD_TO_BRIGHTNESS&RevisionSelectionMethod=LatestReleased&Rendition=Web .. _PUG: http://www.eumetsat.int/website/wcm/idc/idcplg?IdcService=GET_FILE&dDocName=PDF_DMT_719113&RevisionSelectionMethod=LatestReleased&Rendition=Web @@ -137,14 +137,14 @@ def end_time(self): def get_dataset(self, key, info=None): """Load a dataset.""" - logger.debug('Reading {} from {}'.format(key.name, self.filename)) - if "pixel_quality" in key.name: + logger.debug('Reading {} from {}'.format(key['name'], self.filename)) + if "pixel_quality" in key['name']: return self._get_dataset_quality(key, info=info) - elif any(lb in key.name for lb in {"vis_", "ir_", "nir_", "wv_"}): + elif any(lb in key['name'] for lb in {"vis_", "ir_", "nir_", "wv_"}): return self._get_dataset_measurand(key, info=info) else: raise ValueError("Unknown dataset key, not a channel or quality: " - f"{key.name:s}") + f"{key['name']:s}") def _get_dataset_measurand(self, key, info=None): """Load dataset corresponding to channel measurement. @@ -155,7 +155,7 @@ def _get_dataset_measurand(self, key, info=None): """ # Get the dataset # Get metadata for given dataset - measured = self.get_channel_measured_group_path(key.name) + measured = self.get_channel_measured_group_path(key['name']) data = self[measured + "/effective_radiance"] attrs = data.attrs.copy() @@ -165,7 +165,7 @@ def _get_dataset_measurand(self, key, info=None): "FillValue", default_fillvals.get(data.dtype.str[1:], np.nan)) vr = attrs.get("valid_range", [-np.inf, np.inf]) - if key.calibration == "counts": + if key['calibration'] == "counts": attrs["_FillValue"] = fv nfv = fv else: @@ -189,7 +189,7 @@ def _get_dataset_measurand(self, key, info=None): # https://github.com/pytroll/satpy/issues/1171. if "pixel_quality" in attrs["ancillary_variables"]: attrs["ancillary_variables"] = attrs["ancillary_variables"].replace( - "pixel_quality", key.name + "_pixel_quality") + "pixel_quality", key['name'] + "_pixel_quality") else: raise ValueError( "Unexpected value for attribute ancillary_variables, " @@ -205,7 +205,7 @@ def _get_dataset_measurand(self, key, info=None): self["/attr/platform"], self["/attr/platform"]) # remove unpacking parameters for calibrated data - if key.calibration in ['brightness_temperature', 'reflectance']: + if key['calibration'] in ['brightness_temperature', 'reflectance']: res.attrs.pop("add_offset") res.attrs.pop("warm_add_offset") res.attrs.pop("scale_factor") @@ -226,11 +226,11 @@ def _get_dataset_quality(self, key, info=None): necessary. """ # FIXME: replace by .removesuffix after we drop support for Python < 3.9 - if key.name.endswith("_pixel_quality"): - chan_lab = key.name[:-len("_pixel_quality")] + if key['name'].endswith("_pixel_quality"): + chan_lab = key['name'][:-len("_pixel_quality")] else: raise ValueError("Quality label must end with pixel_quality, got " - f"{key.name:s}") + f"{key['name']:s}") grp_path = self.get_channel_measured_group_path(chan_lab) dv_path = grp_path + "/pixel_quality" data = self[dv_path] @@ -247,10 +247,10 @@ def calc_area_extent(self, key): # if a user requests a pixel quality before the channel data, the # yaml-reader will ask the area extent of the pixel quality field, # which will ultimately end up here - if key.name.endswith("_pixel_quality"): - lab = key.name[:-len("_pixel_quality")] + if key['name'].endswith("_pixel_quality"): + lab = key['name'][:-len("_pixel_quality")] else: - lab = key.name + lab = key['name'] # Get metadata for given dataset measured = self.get_channel_measured_group_path(lab) # Get start/end line and column of loaded swath. @@ -295,8 +295,8 @@ def get_area_def(self, key, info=None): """Calculate on-fly area definition for 0 degree geos-projection for a dataset.""" # assumption: channels with same resolution should have same area # cache results to improve performance - if key.resolution in self._cache.keys(): - return self._cache[key.resolution] + if key['resolution'] in self._cache: + return self._cache[key['resolution']] a = float(self["data/mtg_geos_projection/attr/semi_major_axis"]) b = float(self["data/mtg_geos_projection/attr/semi_minor_axis"]) @@ -327,22 +327,22 @@ def get_area_def(self, key, info=None): nlines, area_extent) - self._cache[key.resolution] = area + self._cache[key['resolution']] = area return area def calibrate(self, data, key): """Calibrate data.""" - if key.calibration == "counts": + if key['calibration'] == "counts": # from package description, this just means not applying add_offset # and scale_factor data.attrs["units"] = "1" - elif key.calibration in ['brightness_temperature', 'reflectance', 'radiance']: + elif key['calibration'] in ['brightness_temperature', 'reflectance', 'radiance']: data = self.calibrate_counts_to_physical_quantity(data, key) else: logger.error( "Received unknown calibration key. Expected " "'brightness_temperature', 'reflectance' or 'radiance', got " - + key.calibration + ".") + + key['calibration'] + ".") return data @@ -352,9 +352,9 @@ def calibrate_counts_to_physical_quantity(self, data, key): data = self.calibrate_counts_to_rad(data, key) - if key.calibration == 'brightness_temperature': + if key['calibration'] == 'brightness_temperature': data = self.calibrate_rad_to_bt(data, key) - elif key.calibration == 'reflectance': + elif key['calibration'] == 'reflectance': data = self.calibrate_rad_to_refl(data, key) return data @@ -362,7 +362,7 @@ def calibrate_counts_to_physical_quantity(self, data, key): def calibrate_counts_to_rad(self, data, key): """Calibrate counts to radiances.""" radiance_units = data.attrs["units"] - if key.name == 'ir_38': + if key['name'] == 'ir_38': data = xr.where(((2 ** 12 - 1 < data) & (data <= 2 ** 13 - 1)), (data * data.attrs.get("warm_scale_factor", 1) + data.attrs.get("warm_add_offset", 0)), @@ -379,7 +379,7 @@ def calibrate_counts_to_rad(self, data, key): def calibrate_rad_to_bt(self, radiance, key): """IR channel calibration.""" - measured = self.get_channel_measured_group_path(key.name) + measured = self.get_channel_measured_group_path(key['name']) # using the method from RADTOBR and PUG vc = self[measured + "/radiance_to_bt_conversion_coefficient_wavenumber"] @@ -410,7 +410,7 @@ def calibrate_rad_to_bt(self, radiance, key): def calibrate_rad_to_refl(self, radiance, key): """VIS channel calibration.""" - measured = self.get_channel_measured_group_path(key.name) + measured = self.get_channel_measured_group_path(key['name']) cesi = self[measured + "/channel_effective_solar_irradiance"] diff --git a/satpy/readers/file_handlers.py b/satpy/readers/file_handlers.py index 936a134c6a..086420d626 100644 --- a/satpy/readers/file_handlers.py +++ b/satpy/readers/file_handlers.py @@ -176,7 +176,7 @@ def available_datasets(self, configured_datasets=None): This method should **not** update values of the dataset information dictionary **unless** this file handler has a matching file type (the data could be loaded from this object in the future) and at least - **one** :class:`satpy.dataset.DatasetID` key is also modified. + **one** :class:`satpy.dataset.DataID` key is also modified. Otherwise, this file type may override the information provided by a more preferred file type (as specified in the YAML file). It is recommended that any non-ID metadata be updated during the @@ -185,7 +185,7 @@ def available_datasets(self, configured_datasets=None): other file type's handler. The availability "boolean" not being ``None`` does not mean that a file handler called later can't provide an additional dataset, but - it must provide more identifying (DatasetID) information to do so + it must provide more identifying (DataID) information to do so and should yield its new dataset in addition to the previous one. Args: diff --git a/satpy/readers/generic_image.py b/satpy/readers/generic_image.py index 7794d95e30..d261d2aa2e 100644 --- a/satpy/readers/generic_image.py +++ b/satpy/readers/generic_image.py @@ -103,7 +103,7 @@ def end_time(self): def get_dataset(self, key, info): """Get a dataset from the file.""" logger.debug("Reading %s.", key) - return self.file_content[key.name] + return self.file_content[key['name']] def mask_image_data(data): diff --git a/satpy/readers/geocat.py b/satpy/readers/geocat.py index a63711548c..b9094119f9 100644 --- a/satpy/readers/geocat.py +++ b/satpy/readers/geocat.py @@ -191,7 +191,7 @@ def available_datasets(self, configured_datasets=None): def get_shape(self, dataset_id, ds_info): """Get shape.""" - var_name = ds_info.get('file_key', dataset_id.name) + var_name = ds_info.get('file_key', dataset_id['name']) return self[var_name + '/shape'] def _first_good_nav(self, lon_arr, lat_arr): @@ -235,7 +235,7 @@ def get_area_def(self, dsid): raise NotImplementedError("Don't know how to get the Area Definition for this file") platform = self.get_platform(self['/attr/Platform_Name']) - res = self._calc_area_resolution(dsid.resolution) + res = self._calc_area_resolution(dsid['resolution']) proj = self._get_proj(platform, float(self['/attr/Subsatellite_Longitude'])) area_name = '{} {} Area at {}m'.format( platform, @@ -257,7 +257,7 @@ def get_area_def(self, dsid): def get_metadata(self, dataset_id, ds_info): """Get metadata.""" - var_name = ds_info.get('file_key', dataset_id.name) + var_name = ds_info.get('file_key', dataset_id['name']) shape = self.get_shape(dataset_id, ds_info) info = getattr(self[var_name], 'attrs', {}) info['shape'] = shape @@ -269,7 +269,7 @@ def get_metadata(self, dataset_id, ds_info): info['sensor'] = self.get_sensor(self['/attr/Sensor_Name']) info['platform_name'] = self.get_platform(self['/attr/Platform_Name']) - info['resolution'] = dataset_id.resolution + info['resolution'] = dataset_id['resolution'] if var_name == 'pixel_longitude': info['standard_name'] = 'longitude' elif var_name == 'pixel_latitude': @@ -279,7 +279,7 @@ def get_metadata(self, dataset_id, ds_info): def get_dataset(self, dataset_id, ds_info): """Get dataset.""" - var_name = ds_info.get('file_key', dataset_id.name) + var_name = ds_info.get('file_key', dataset_id['name']) # FUTURE: Metadata retrieval may be separate info = self.get_metadata(dataset_id, ds_info) data = self[var_name] diff --git a/satpy/readers/ghrsst_l3c_sst.py b/satpy/readers/ghrsst_l3c_sst.py index 8055cdfd56..9d3cf569c7 100644 --- a/satpy/readers/ghrsst_l3c_sst.py +++ b/satpy/readers/ghrsst_l3c_sst.py @@ -44,7 +44,7 @@ def get_area_def(self, area_id, area_info): def get_dataset(self, dataset_id, ds_info, out=None): """Load a dataset.""" - var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', '{}'.format(dataset_id['name'])) dtype = ds_info.get('dtype', np.float32) if var_path + '/shape' not in self: # loading a scalar value diff --git a/satpy/readers/glm_l2.py b/satpy/readers/glm_l2.py index 15c03a7dd0..9ae37bbaf4 100644 --- a/satpy/readers/glm_l2.py +++ b/satpy/readers/glm_l2.py @@ -58,8 +58,8 @@ def end_time(self): def get_dataset(self, key, info): """Load a dataset.""" - logger.debug('Reading in get_dataset %s.', key.name) - res = self[key.name] + logger.debug('Reading in get_dataset %s.', key['name']) + res = self[key['name']] res.attrs.update({'platform_name': self.platform_name, 'sensor': self.sensor}) res.attrs.update(self.filename_info) diff --git a/satpy/readers/goes_imager_hrit.py b/satpy/readers/goes_imager_hrit.py index a50cdab125..8ff3f786a5 100644 --- a/satpy/readers/goes_imager_hrit.py +++ b/satpy/readers/goes_imager_hrit.py @@ -379,7 +379,7 @@ def get_dataset(self, key, info): self.mda['calibration_parameters'] = self._get_calibration_params() - res = self.calibrate(res, key.calibration) + res = self.calibrate(res, key['calibration']) new_attrs = info.copy() new_attrs.update(res.attrs) res.attrs = new_attrs diff --git a/satpy/readers/goes_imager_nc.py b/satpy/readers/goes_imager_nc.py index 3e4c70ab71..5a2bf375c6 100644 --- a/satpy/readers/goes_imager_nc.py +++ b/satpy/readers/goes_imager_nc.py @@ -15,8 +15,8 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Reader for GOES 8-15 imager data in netCDF format from NOAA CLASS - Also handles GOES 15 data in netCDF format reformated by Eumetsat +"""Reader for GOES 8-15 imager data in netCDF format from NOAA CLASS. +Also handles GOES 15 data in netCDF format reformated by Eumetsat GOES Imager netCDF files contain geolocated detector counts. If ordering via NOAA CLASS, select 16 bits/pixel. The instrument oversamples the viewed scene @@ -564,7 +564,8 @@ class GOESNCBaseFileHandler(BaseFileHandler): - """File handler for GOES Imager data in netCDF format""" + """File handler for GOES Imager data in netCDF format.""" + def __init__(self, filename, filename_info, filetype_info, geo_data=None): """Initialize the reader.""" super(GOESNCBaseFileHandler, self).__init__(filename, filename_info, @@ -588,27 +589,29 @@ def __init__(self, filename, filename_info, filetype_info, geo_data=None): @abstractmethod def get_dataset(self, key, info): - """Load dataset designated by the given key from file""" + """Load dataset designated by the given key from file.""" raise NotImplementedError @abstractmethod def calibrate(self, data, calibration, channel): - """Perform calibration""" + """Perform calibration.""" raise NotImplementedError @property @abstractmethod def vis_sectors(self): + """Get the vis sectors.""" raise NotImplementedError @property @abstractmethod def ir_sectors(self): + """Get the ir sectors.""" raise NotImplementedError @staticmethod def _get_platform_name(ncattr): - """Determine name of the platform""" + """Determine name of the platform.""" match = re.match(r'G-(\d+)', ncattr) if match: return SPACECRAFTS.get(int(match.groups()[0])) @@ -616,7 +619,7 @@ def _get_platform_name(ncattr): return None def _get_sector(self, channel, nlines, ncols): - """Determine which sector was scanned""" + """Determine which sector was scanned.""" if self._is_vis(channel): margin = 100 sectors_ref = self.vis_sectors @@ -633,7 +636,7 @@ def _get_sector(self, channel, nlines, ncols): @staticmethod def _is_vis(channel): - """Determine whether the given channel is a visible channel""" + """Determine whether the given channel is a visible channel.""" if isinstance(channel, str): return channel == '00_7' elif isinstance(channel, int): @@ -643,7 +646,7 @@ def _is_vis(channel): @staticmethod def _get_earth_mask(lat): - """Identify earth/space pixels + """Identify earth/space pixels. Returns: Mask (1=earth, 0=space) @@ -653,7 +656,7 @@ def _get_earth_mask(lat): @staticmethod def _get_nadir_pixel(earth_mask, sector): - """Find the nadir pixel + """Find the nadir pixel. Args: earth_mask: Mask identifying earth and space pixels @@ -678,7 +681,7 @@ def _get_nadir_pixel(earth_mask, sector): @staticmethod def _is_yaw_flip(lat, delta=10): - """Determine whether the satellite is yaw-flipped ('upside down')""" + """Determine whether the satellite is yaw-flipped ('upside down').""" logger.debug('Computing yaw flip flag') # In case of yaw-flip the data and coordinates in the netCDF files are # also flipped. Just check whether the latitude increases or decrases @@ -687,7 +690,7 @@ def _is_yaw_flip(lat, delta=10): return (lat[crow+delta, ccol] - lat[crow, ccol]).values > 0 def _get_area_def_uniform_sampling(self, lon0, channel): - """Get area definition with uniform sampling""" + """Get area definition with uniform sampling.""" logger.debug('Computing area definition') if lon0 is not None: @@ -730,7 +733,7 @@ def _get_area_def_uniform_sampling(self, lon0, channel): @property def start_time(self): - """Start timestamp of the dataset""" + """Start timestamp of the dataset.""" dt = self.nc['time'].dt return datetime(year=dt.year, month=dt.month, day=dt.day, hour=dt.hour, minute=dt.minute, @@ -738,7 +741,7 @@ def start_time(self): @property def end_time(self): - """End timestamp of the dataset""" + """End timestamp of the dataset.""" try: return self.start_time + SCAN_DURATION[self.sector] except KeyError: @@ -761,7 +764,7 @@ def resolution(self): return 1000. * self.nc['lineRes'].values def get_shape(self, key, info): - """Get the shape of the data + """Get the shape of the data. Returns: Number of lines, number of columns @@ -770,7 +773,7 @@ def get_shape(self, key, info): @property def meta(self): - """Derive metadata from the coordinates""" + """Derive metadata from the coordinates.""" # Use buffered data if available if self._meta is None: lat = self.geo_data['lat'] @@ -797,7 +800,7 @@ def meta(self): return self._meta def _counts2radiance(self, counts, coefs, channel): - """Convert raw detector counts to radiance""" + """Convert raw detector counts to radiance.""" logger.debug('Converting counts to radiance') if self._is_vis(channel): @@ -812,7 +815,7 @@ def _counts2radiance(self, counts, coefs, channel): offset=coefs['offset']) def _calibrate(self, radiance, coefs, channel, calibration): - """Convert radiance to reflectance or brightness temperature""" + """Convert radiance to reflectance or brightness temperature.""" if self._is_vis(channel): if not calibration == 'reflectance': raise ValueError('Cannot calibrate VIS channel to ' @@ -834,7 +837,7 @@ def _calibrate(self, radiance, coefs, channel, calibration): @staticmethod def _ircounts2radiance(counts, scale, offset): - """Convert IR counts to radiance + """Convert IR counts to radiance. Reference: [IR]. @@ -851,7 +854,7 @@ def _ircounts2radiance(counts, scale, offset): @staticmethod def _calibrate_ir(radiance, coefs): - """Convert IR radiance to brightness temperature + """Convert IR radiance to brightness temperature. Reference: [IR] @@ -880,7 +883,7 @@ def _calibrate_ir(radiance, coefs): @staticmethod def _viscounts2radiance(counts, slope, offset): - """Convert VIS counts to radiance + """Convert VIS counts to radiance. References: [VIS] @@ -896,7 +899,7 @@ def _viscounts2radiance(counts, slope, offset): @staticmethod def _calibrate_vis(radiance, k): - """Convert VIS radiance to reflectance + """Convert VIS radiance to reflectance. Note: Angle of incident radiation and annual variation of the earth-sun distance is not taken into account. A value of 100% @@ -922,7 +925,7 @@ def _calibrate_vis(radiance, k): return refl.clip(min=0) def _update_metadata(self, data, ds_info): - """Update metadata of the given DataArray""" + """Update metadata of the given DataArray.""" # Metadata from the dataset definition data.attrs.update(ds_info) @@ -957,6 +960,7 @@ def _update_metadata(self, data, ds_info): ) def __del__(self): + """Delete.""" try: self.nc.close() except (AttributeError, IOError, OSError): @@ -973,7 +977,6 @@ def available_datasets(self, configured_datasets=None): See :meth:`satpy.readers.file_handlers.BaseFileHandler.available_datasets` for details. - """ res = self.resolution # update previously configured datasets @@ -991,7 +994,7 @@ def available_datasets(self, configured_datasets=None): class GOESNCFileHandler(GOESNCBaseFileHandler): - """File handler for GOES Imager data in netCDF format""" + """File handler for GOES Imager data in netCDF format.""" vis_sectors = VIS_SECTORS ir_sectors = IR_SECTORS @@ -1002,19 +1005,19 @@ def __init__(self, filename, filename_info, filetype_info): filetype_info) def get_dataset(self, key, info): - """Load dataset designated by the given key from file""" - logger.debug('Reading dataset {}'.format(key.name)) + """Load dataset designated by the given key from file.""" + logger.debug('Reading dataset {}'.format(key['name'])) # Read data from file and calibrate if necessary - if 'longitude' in key.name: + if 'longitude' in key['name']: data = self.geo_data['lon'] - elif 'latitude' in key.name: + elif 'latitude' in key['name']: data = self.geo_data['lat'] else: tic = datetime.now() data = self.calibrate(self.nc['data'].isel(time=0), - calibration=key.calibration, - channel=key.name) + calibration=key['calibration'], + channel=key['name']) logger.debug('Calibration time: {}'.format(datetime.now() - tic)) # Mask space pixels @@ -1029,7 +1032,7 @@ def get_dataset(self, key, info): return data def calibrate(self, counts, calibration, channel): - """Perform calibration""" + """Perform calibration.""" # Convert 16bit counts from netCDF4 file to the original 10bit # GVAR counts by dividing by 32. See [FAQ]. counts = counts / 32. @@ -1052,7 +1055,7 @@ def calibrate(self, counts, calibration, channel): class GOESEUMNCFileHandler(GOESNCBaseFileHandler): - """File handler for GOES Imager data in EUM netCDF format + """File handler for GOES Imager data in EUM netCDF format. TODO: Remove datasets which are not available in the file (counts, VIS radiance) via available_datasets() -> See #434 @@ -1067,13 +1070,13 @@ def __init__(self, filename, filename_info, filetype_info, geo_data): filetype_info, geo_data) def get_dataset(self, key, info): - """Load dataset designated by the given key from file""" - logger.debug('Reading dataset {}'.format(key.name)) + """Load dataset designated by the given key from file.""" + logger.debug('Reading dataset {}'.format(key['name'])) tic = datetime.now() data = self.calibrate(self.nc['data'].isel(time=0), - calibration=key.calibration, - channel=key.name) + calibration=key['calibration'], + channel=key['name']) logger.debug('Calibration time: {}'.format(datetime.now() - tic)) # Mask space pixels @@ -1089,7 +1092,7 @@ def get_dataset(self, key, info): return data def calibrate(self, data, calibration, channel): - """Perform calibration""" + """Perform calibration.""" coefs = CALIB_COEFS[self.platform_name][channel] is_vis = self._is_vis(channel) @@ -1107,7 +1110,8 @@ def calibrate(self, data, calibration, channel): class GOESEUMGEONCFileHandler(BaseFileHandler): - """File handler for GOES Geolocation data in EUM netCDF format""" + """File handler for GOES Geolocation data in EUM netCDF format.""" + def __init__(self, filename, filename_info, filetype_info): """Initialize the reader.""" super(GOESEUMGEONCFileHandler, self).__init__(filename, filename_info, @@ -1125,19 +1129,20 @@ def __init__(self, filename, filename_info, filetype_info): self._meta = None def __getitem__(self, item): + """Get item.""" return getattr(self.nc, item) def get_dataset(self, key, info): - """Load dataset designated by the given key from file""" - logger.debug('Reading dataset {}'.format(key.name)) + """Load dataset designated by the given key from file.""" + logger.debug('Reading dataset {}'.format(key['name'])) # Read data from file and calibrate if necessary - if 'longitude' in key.name: + if 'longitude' in key['name']: data = self.nc['lon'] - elif 'latitude' in key.name: + elif 'latitude' in key['name']: data = self.nc['lat'] else: - raise KeyError("Unknown dataset: {}".format(key.name)) + raise KeyError("Unknown dataset: {}".format(key['name'])) # Set proper dimension names data = data.rename({'xc': 'x', 'yc': 'y'}) @@ -1156,7 +1161,7 @@ def resolution(self): class GOESCoefficientReader(object): - """Read GOES Imager calibration coefficients from NOAA reference HTMLs""" + """Read GOES Imager calibration coefficients from NOAA reference HTMLs.""" gvar_channels = { 'GOES-8': {'00_7': 1, '03_9': 2, '06_8': 3, '10_7': 4, '12_0': 5}, @@ -1192,6 +1197,7 @@ class GOESCoefficientReader(object): } def __init__(self, ir_url, vis_url): + """Init the coef reader.""" from bs4 import BeautifulSoup self.ir_html = BeautifulSoup(self._load_url_or_file(ir_url), features="html5lib") @@ -1216,6 +1222,7 @@ def _load_url_or_file(self, url): raise ValueError('Invalid URL or file: {}'.format(url)) def get_coefs(self, platform, channel): + """Get the coefs.""" if channel == '00_7': return self._get_vis_coefs(platform=platform) @@ -1308,7 +1315,7 @@ def _denoise(self, string): return string.replace('\n', '').replace(' ', '') def _float(self, string): - """Convert string to float + """Convert string to float. Take care of numbers in exponential format """ @@ -1325,7 +1332,7 @@ def _float(self, string): def test_coefs(ir_url, vis_url): - """Test calibration coefficients against NOAA reference pages + """Test calibration coefficients against NOAA reference pages. Currently the reference pages are: diff --git a/satpy/readers/gpm_imerg.py b/satpy/readers/gpm_imerg.py index 9101c7a307..fb08d354ef 100644 --- a/satpy/readers/gpm_imerg.py +++ b/satpy/readers/gpm_imerg.py @@ -65,7 +65,7 @@ def end_time(self): def get_dataset(self, dataset_id, ds_info): """Load a dataset.""" - file_key = ds_info.get('file_key', dataset_id.name) + file_key = ds_info.get('file_key', dataset_id['name']) dsname = 'Grid/' + file_key data = self[dsname].squeeze().transpose() data.values = np.flipud(data.values) diff --git a/satpy/readers/grib.py b/satpy/readers/grib.py index 9a20a185d1..d243938743 100644 --- a/satpy/readers/grib.py +++ b/satpy/readers/grib.py @@ -30,8 +30,9 @@ from pyresample import geometry from datetime import datetime -from satpy import DatasetID, CHUNK_SIZE +from satpy import CHUNK_SIZE from satpy.readers.file_handlers import BaseFileHandler +from satpy.dataset import DataQuery import pygrib LOG = logging.getLogger(__name__) @@ -43,8 +44,10 @@ class GRIBFileHandler(BaseFileHandler): + """File handler for grib files.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" super(GRIBFileHandler, self).__init__(filename, filename_info, filetype_info) self._msg_datasets = {} @@ -73,8 +76,9 @@ def __init__(self, filename, filename_info, filetype_info): def _analyze_messages(self, grib_file): grib_file.seek(0) for idx, msg in enumerate(grib_file): - msg_id = DatasetID(name=msg['shortName'], - level=msg['level']) + msg_id = DataQuery(name=msg['shortName'], + level=msg['level'], + modifiers=tuple()) ds_info = { 'message': idx + 1, 'name': msg['shortName'], @@ -90,7 +94,7 @@ def _create_dataset_ids(self, keys): id_keys = [keys[k]['id_key'] for k in ordered_keys] msg_info = dict(zip(ordered_keys, id_vals)) ds_info = dict(zip(id_keys, id_vals)) - msg_id = DatasetID(**ds_info) + msg_id = DataQuery(**ds_info) ds_info = msg_id.to_dict() ds_info.update(msg_info) ds_info['file_type'] = self.filetype_info['file_type'] @@ -120,7 +124,7 @@ def end_time(self): return self._end_time def available_datasets(self, configured_datasets=None): - """Automatically determine datasets provided by this file""" + """Automatically determine datasets provided by this file.""" # previously configured or provided datasets # we can't provide any additional information for is_avail, ds_info in (configured_datasets or []): @@ -219,6 +223,7 @@ def get_area_def(self, dsid): raise RuntimeError("Unknown GRIB projection information") def get_metadata(self, msg, ds_info): + """Get metadata.""" model_time = self._convert_datetime(msg, 'dataDate', 'dataTime') start_time = self._convert_datetime(msg, 'validityDate', diff --git a/satpy/readers/hdfeos_base.py b/satpy/readers/hdfeos_base.py index f715c2714b..7dd93073fe 100644 --- a/satpy/readers/hdfeos_base.py +++ b/satpy/readers/hdfeos_base.py @@ -256,9 +256,9 @@ def get_dataset(self, dataset_keys, dataset_info): # Name of the dataset as it appears in the HDF EOS file in_file_dataset_name = dataset_info.get('file_key') # Name of the dataset in the YAML file - dataset_name = dataset_keys.name + dataset_name = dataset_keys['name'] # Resolution asked - resolution = dataset_keys.resolution + resolution = dataset_keys['resolution'] if in_file_dataset_name is not None: # if the YAML was configured with a specific name use that data = self.load_dataset(in_file_dataset_name) diff --git a/satpy/readers/hrit_jma.py b/satpy/readers/hrit_jma.py index 12294e3e1d..de24f4e6be 100644 --- a/satpy/readers/hrit_jma.py +++ b/satpy/readers/hrit_jma.py @@ -19,7 +19,6 @@ Introduction ------------ - The JMA HRIT format is described in the `JMA HRIT - Mission Specific Implementation`_. There are three readers for this format in Satpy: @@ -37,7 +36,6 @@ Example ------- - Here is an example how to read Himwari-8 HRIT data with Satpy: .. code-block:: python @@ -179,7 +177,6 @@ def mjd2datetime64(mjd): """Convert Modified Julian Day (MJD) to datetime64.""" - epoch = np.datetime64('1858-11-17 00:00') day2usec = 24 * 3600 * 1E6 mjd_usec = (mjd * day2usec).astype(np.int64).astype('timedelta64[us]') @@ -368,7 +365,8 @@ def _mask_space(self, data): return data.where(geomask) def _get_acq_time(self): - """ + r"""Get the acquisition times from the file. + Acquisition times for a subset of scanlines are stored in the header as follows: diff --git a/satpy/readers/hrpt.py b/satpy/readers/hrpt.py index 5b178c4244..ffe6b4c471 100644 --- a/satpy/readers/hrpt.py +++ b/satpy/readers/hrpt.py @@ -63,8 +63,7 @@ def time_seconds(tc_array, year): - """Return the time object from the timecodes - """ + """Return the time object from the timecodes.""" tc_array = np.array(tc_array, copy=True) word = tc_array[:, 0] day = word >> 1 @@ -82,8 +81,7 @@ def time_seconds(tc_array, year): def bfield(array, bit): - """return the bit array. - """ + """Return the bit array.""" return (array & 2**(9 - bit + 1)).astype(np.bool) @@ -91,6 +89,7 @@ def bfield(array, bit): def geo_interpolate(lons32km, lats32km): + """Interpolate geo data.""" from geotiepoints import SatelliteInterpolator cols32km = np.arange(0, 2048, 32) cols1km = np.arange(2048) @@ -109,10 +108,10 @@ def geo_interpolate(lons32km, lats32km): class HRPTFile(BaseFileHandler): - """Reader for HRPT Minor Frame, 10 bits data expanded to 16 bits. - """ + """Reader for HRPT Minor Frame, 10 bits data expanded to 16 bits.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" super(HRPTFile, self).__init__(filename, filename_info, filetype_info) self.channels = {i: None for i in AVHRR_CHANNEL_NAMES} self.units = {i: 'counts' for i in AVHRR_CHANNEL_NAMES} @@ -131,7 +130,7 @@ def __init__(self, filename, filename_info, filetype_info): self.read() def read(self): - + """Read the file.""" with open(self.filename, "rb") as fp_: self._data = np.memmap(fp_, dtype=dtype, mode="r") if np.all(self._data['frame_sync'][0] > 1024): @@ -140,12 +139,13 @@ def read(self): (self._data["id"]["id"][0] >> 3) & 15] def get_dataset(self, key, info): + """Get the dataset.""" if self._data is None: self.read() - if key.name in ['latitude', 'longitude']: + if key['name'] in ['latitude', 'longitude']: lons, lats = self.get_lonlats() - if key.name == 'latitude': + if key['name'] == 'latitude': return Dataset(lats, id=key) else: return Dataset(lons, id=key) @@ -156,19 +156,19 @@ def get_dataset(self, key, info): '3b': 2, '4': 3, '5': 4} - index = avhrr_channel_index[key.name] + index = avhrr_channel_index[key['name']] mask = False - if key.name in ['3a', '3b'] and self._is3b is None: + if key['name'] in ['3a', '3b'] and self._is3b is None: ch3a = bfield(self._data["id"]["id"], 10) self._is3b = np.logical_not(ch3a) - if key.name == '3a': + if key['name'] == '3a': mask = np.tile(self._is3b, (1, 2048)) - elif key.name == '3b': + elif key['name'] == '3b': mask = np.tile(np.logical_not(self._is3b), (1, 2048)) data = self._data["image_data"][:, :, index] - if key.calibration == 'counts': + if key['calibration'] == 'counts': return Dataset(data, mask=mask, area=self.get_lonlats(), @@ -178,12 +178,12 @@ def get_dataset(self, key, info): jdays = (np.datetime64(self.start_time) - np.datetime64(str( self.year) + '-01-01T00:00:00Z')) / np.timedelta64(1, 'D') - if index < 2 or key.name == '3a': + if index < 2 or key['name'] == '3a': data = calibrate_solar(data, index, self.year, jdays, pg_spacecraft) units = '%' - if index > 2 or key.name == '3b': + if index > 2 or key['name'] == '3b': if self.times is None: self.times = time_seconds(self._data["timecode"], self.year) line_numbers = ( @@ -201,6 +201,7 @@ def get_dataset(self, key, info): return Dataset(data, mask=mask, units=units) def get_telemetry(self): + """Get the telemetry.""" prt = np.mean(self._data["telemetry"]['PRT'], axis=1) ict = np.empty((len(self._data), 3)) @@ -215,6 +216,7 @@ def get_telemetry(self): return prt, ict, space def get_lonlats(self): + """Get the lonlats.""" if self.lons is not None and self.lats is not None: return self.lons, self.lats from pyorbital.orbital import Orbital @@ -246,10 +248,12 @@ def get_lonlats(self): @property def start_time(self): + """Get the start time.""" return time_seconds(self._data["timecode"][0, np.newaxis, :], self.year).astype(datetime)[0] @property def end_time(self): + """Get the end time.""" return time_seconds(self._data["timecode"][-1, np.newaxis, :], self.year).astype(datetime)[0] diff --git a/satpy/readers/hsaf_grib.py b/satpy/readers/hsaf_grib.py index dd3c1f25f0..f51036a38f 100644 --- a/satpy/readers/hsaf_grib.py +++ b/satpy/readers/hsaf_grib.py @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""A reader for files produced by the Hydrology SAF +"""A reader for files produced by the Hydrology SAF. Currently this reader depends on the `pygrib` python package. The `eccodes` package from ECMWF is preferred, but does not support python 3 at the time @@ -42,8 +42,10 @@ class HSAFFileHandler(BaseFileHandler): + """File handler for HSAF grib files.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" super(HSAFFileHandler, self).__init__(filename, filename_info, filetype_info) @@ -68,12 +70,11 @@ def _get_datetime(msg): @property def analysis_time(self): - """ - Get validity time of this file - """ + """Get validity time of this file.""" return self._analysis_time def get_metadata(self, msg): + """Get the metadata.""" try: center_description = msg['centreDescription'] except (RuntimeError, KeyError): @@ -92,9 +93,7 @@ def get_metadata(self, msg): return ds_info def get_area_def(self, dsid): - """ - Get area definition for message. - """ + """Get area definition for message.""" msg = self._get_message(1) try: return self._get_area_def(msg) @@ -102,10 +101,7 @@ def get_area_def(self, dsid): raise RuntimeError("Unknown GRIB projection information") def _get_area_def(self, msg): - """ - Get the area definition of the datasets in the file. - """ - + """Get the area definition of the datasets in the file.""" proj_param = msg.projparams.copy() Rx = 2 * np.arcsin(1. / msg['NrInRadiusOfEarth']) / msg['dx'] @@ -141,15 +137,15 @@ def _get_message(self, idx): def get_dataset(self, ds_id, ds_info): """Read a GRIB message into an xarray DataArray.""" - if (ds_id.name not in self.filename): - raise IOError("File does not contain {} data".format(ds_id.name)) + if (ds_id['name'] not in self.filename): + raise IOError("File does not contain {} data".format(ds_id['name'])) msg = self._get_message(1) ds_info = self.get_metadata(msg) ds_info['end_time'] = ds_info['data_time'] - if (ds_id.name == 'h05' or ds_id.name == 'h05B'): + if (ds_id['name'] == 'h05' or ds_id['name'] == 'h05B'): flen = len(self.filename) timedelt = self.filename[flen-10:flen-8] ds_info['start_time'] = (ds_info['end_time'] - diff --git a/satpy/readers/hy2_scat_l2b_h5.py b/satpy/readers/hy2_scat_l2b_h5.py index 4eaf153058..5de9c3822b 100644 --- a/satpy/readers/hy2_scat_l2b_h5.py +++ b/satpy/readers/hy2_scat_l2b_h5.py @@ -14,7 +14,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""HY-2B L2B Reader, distributed by Eumetsat in HDF5 format""" +"""HY-2B L2B Reader, distributed by Eumetsat in HDF5 format.""" import numpy as np import xarray as xr @@ -26,6 +26,7 @@ class HY2SCATL2BH5FileHandler(HDF5FileHandler): + """File handler for HY2 scat.""" @property def start_time(self): @@ -41,10 +42,11 @@ def end_time(self): @property def platform_name(self): - """Platform ShortName""" + """Get the Platform ShortName.""" return self['/attr/Platform_ShortName'] def get_variable_metadata(self): + """Get the variable metadata.""" info = getattr(self, 'attrs', {}) info.update({ "Equator_Crossing_Longitude": self['/attr/Equator_Crossing_Longitude'], @@ -61,6 +63,7 @@ def get_variable_metadata(self): return info def get_metadata(self): + """Get the metadata.""" info = getattr(self, 'attrs', {}) info.update({ "WVC_Size": self['/attr/WVC_Size'], @@ -85,23 +88,24 @@ def get_metadata(self): return info def get_dataset(self, key, info): + """Get the dataset.""" dims = ['y', 'x'] - if self[key.name].ndim == 3: + if self[key['name']].ndim == 3: dims = ['y', 'x', 'selection'] - if key.name in 'wvc_row_time': - data = xr.DataArray(da.from_array(self[key.name][:]), - attrs={'fill_value': self[key.name].attrs['fill_value']}, - name=key.name, + if key['name'] in 'wvc_row_time': + data = xr.DataArray(da.from_array(self[key['name']][:]), + attrs={'fill_value': self[key['name']].attrs['fill_value']}, + name=key['name'], dims=['y', ]) else: - data = xr.DataArray(da.from_array(self[key.name][:], + data = xr.DataArray(da.from_array(self[key['name']][:], chunks=CHUNK_SIZE), - name=key.name, dims=dims) + name=key['name'], dims=dims) - data = self._mask_data(key.name, data) - data = self._scale_data(key.name, data) + data = self._mask_data(key['name'], data) + data = self._scale_data(key['name'], data) - if key.name in 'wvc_lon': + if key['name'] in 'wvc_lon': data = xr.where(data > 180, data - 360., data) data.attrs.update(info) data.attrs.update(self.get_metadata()) diff --git a/satpy/readers/iasi_l2.py b/satpy/readers/iasi_l2.py index 291c2eda0e..37cc787e70 100644 --- a/satpy/readers/iasi_l2.py +++ b/satpy/readers/iasi_l2.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Copyright (c) 2017 Satpy developers +# Copyright (c) 2017-2020 Satpy developers # # This file is part of satpy. # @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""IASI L2 HDF5 files. -""" +"""IASI L2 HDF5 files.""" import h5py import numpy as np @@ -75,10 +74,10 @@ class IASIL2HDF5(BaseFileHandler): - """File handler for IASI L2 HDF5 files.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" super(IASIL2HDF5, self).__init__(filename, filename_info, filetype_info) @@ -94,10 +93,12 @@ def __init__(self, filename, filename_info, filetype_info): @property def start_time(self): + """Get the start time.""" return self.finfo['start_time'] @property def end_time(self): + """Get the end time.""" end_time = dt.datetime.combine(self.start_time.date(), self.finfo['end_time'].time()) if end_time < self.start_time: @@ -105,10 +106,10 @@ def end_time(self): return end_time def get_dataset(self, key, info): - """Load a dataset""" + """Load a dataset.""" with h5py.File(self.filename, 'r') as fid: - LOGGER.debug('Reading %s.', key.name) - if key.name in DSET_NAMES: + LOGGER.debug('Reading %s.', key['name']) + if key['name'] in DSET_NAMES: m_data = read_dataset(fid, key) else: m_data = read_geo(fid, key) @@ -119,15 +120,15 @@ def get_dataset(self, key, info): def read_dataset(fid, key): - """Read dataset""" - dsid = DSET_NAMES[key.name] + """Read dataset.""" + dsid = DSET_NAMES[key['name']] dset = fid["/PWLR/" + dsid] if dset.ndim == 3: dims = ['y', 'x', 'level'] else: dims = ['y', 'x'] - data = xr.DataArray(da.from_array(dset.value, chunks=CHUNK_SIZE), - name=key.name, dims=dims).astype(np.float32) + data = xr.DataArray(da.from_array(dset[()], chunks=CHUNK_SIZE), + name=key['name'], dims=dims).astype(np.float32) data = xr.where(data > 1e30, np.nan, data) dset_attrs = dict(dset.attrs) @@ -138,19 +139,19 @@ def read_dataset(fid, key): def read_geo(fid, key): """Read geolocation and related datasets.""" - dsid = GEO_NAMES[key.name] + dsid = GEO_NAMES[key['name']] add_epoch = False - if "time" in key.name: - days = fid["/L1C/" + dsid["day"]].value - msecs = fid["/L1C/" + dsid["msec"]].value + if "time" in key['name']: + days = fid["/L1C/" + dsid["day"]][()] + msecs = fid["/L1C/" + dsid["msec"]][()] data = _form_datetimes(days, msecs) add_epoch = True dtype = np.float64 else: - data = fid["/L1C/" + dsid].value + data = fid["/L1C/" + dsid][()] dtype = np.float32 data = xr.DataArray(da.from_array(data, chunks=CHUNK_SIZE), - name=key.name, dims=['y', 'x']).astype(dtype) + name=key['name'], dims=['y', 'x']).astype(dtype) if add_epoch: data.attrs['sensing_time_epoch'] = EPOCH @@ -160,7 +161,6 @@ def read_geo(fid, key): def _form_datetimes(days, msecs): """Calculate seconds since EPOCH from days and milliseconds for each of IASI scan.""" - all_datetimes = [] for i in range(days.size): day = int(days[i]) @@ -169,7 +169,7 @@ def _form_datetimes(days, msecs): for j in range(int(VALUES_PER_SCAN_LINE / 4)): usec = 1000 * (j * VIEW_TIME_ADJUSTMENT + msec) delta = (dt.timedelta(days=day, microseconds=usec)) - for k in range(4): + for _k in range(4): scanline_datetimes.append(delta.total_seconds()) all_datetimes.append(scanline_datetimes) diff --git a/satpy/readers/li_l2.py b/satpy/readers/li_l2.py index 09be2f1fa1..ae799f49bd 100644 --- a/satpy/readers/li_l2.py +++ b/satpy/readers/li_l2.py @@ -75,11 +75,11 @@ def get_dataset(self, key, info=None, out=None): "lfl": "radiance"} # Get lightning data out of NetCDF container - logger.debug("Key: {}".format(key.name)) + logger.debug("Key: {}".format(key['name'])) # Create reference grid grid = np.full((self.nlines, self.ncols), np.NaN) # Get product values - values = self.nc[typedict[key.name]] + values = self.nc[typedict[key['name']]] rows = self.nc['row'] cols = self.nc['column'] logger.debug('[ Number of values ] : {}'.format((len(values)))) diff --git a/satpy/readers/maia.py b/satpy/readers/maia.py index d84078d41a..5a45170d4f 100644 --- a/satpy/readers/maia.py +++ b/satpy/readers/maia.py @@ -39,8 +39,10 @@ class MAIAFileHandler(BaseFileHandler): + """File handler for Maia files.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" super(MAIAFileHandler, self).__init__( filename, filename_info, filetype_info) self.finfo = filename_info @@ -57,6 +59,7 @@ def __init__(self, filename, filename_info, filetype_info): self.read(self.filename) def read(self, filename): + """Read the file.""" self.h5 = h5py.File(filename, 'r') missing = -9999. self.Lat = da.from_array(self.h5[u'DATA/Latitude'], chunks=CHUNK_SIZE) / 10000. @@ -105,6 +108,7 @@ def read(self, filename): self.file_content['ct'] = classif.astype(np.uint8) def get_platform(self, platform): + """Get the platform.""" if self.file_content['sat_id'] in (14,): return "viirs" else: @@ -112,26 +116,27 @@ def get_platform(self, platform): @property def start_time(self): + """Get the start time.""" return self.finfo['start_time'] @property def end_time(self): + """Get the end time.""" return self.finfo['end_time'] def get_dataset(self, key, info, out=None): """Get a dataset from the file.""" - - logger.debug("Reading %s.", key.name) - values = self.file_content[key.name] + logger.debug("Reading %s.", key['name']) + values = self.file_content[key['name']] selected = np.array(self.selected) - if key.name in ("Latitude", "Longitude"): + if key['name'] in ("Latitude", "Longitude"): values = values / 10000. - if key.name in ('Tsurf', 'CloudTopPres', 'CloudTopTemp'): + if key['name'] in ('Tsurf', 'CloudTopPres', 'CloudTopTemp'): goods = values > -9998. selected = np.array(selected & goods) - if key.name in ('Tsurf', "Alt_surface", "CloudTopTemp"): + if key['name'] in ('Tsurf', "Alt_surface", "CloudTopTemp"): values = values / 100. - if key.name in ("CloudTopPres"): + if key['name'] in ("CloudTopPres"): values = values / 10. else: selected = self.selected @@ -139,7 +144,7 @@ def get_dataset(self, key, info, out=None): fill_value = np.nan - if key.name == 'ct': + if key['name'] == 'ct': fill_value = 0 info['_FillValue'] = 0 ds = DataArray(values, dims=['y', 'x'], attrs=info).where(selected, fill_value) diff --git a/satpy/readers/mersi2_l1b.py b/satpy/readers/mersi2_l1b.py index 20328f534a..ebb377bf14 100644 --- a/satpy/readers/mersi2_l1b.py +++ b/satpy/readers/mersi2_l1b.py @@ -78,7 +78,7 @@ def _get_coefficients(self, cal_key, cal_index): def get_dataset(self, dataset_id, ds_info): """Load data variable and metadata and calibrate if needed.""" - file_key = ds_info.get('file_key', dataset_id.name) + file_key = ds_info.get('file_key', dataset_id['name']) band_index = ds_info.get('band_index') data = self[file_key] if band_index is not None: @@ -92,7 +92,7 @@ def get_dataset(self, dataset_id, ds_info): fill_value = attrs.pop('FillValue', np.nan) # covered by valid_range valid_range = attrs.pop('valid_range', None) - if dataset_id.calibration == 'counts': + if dataset_id['calibration'] == 'counts': # preserve integer type of counts if possible attrs['_FillValue'] = fill_value new_fill = fill_value @@ -101,7 +101,7 @@ def get_dataset(self, dataset_id, ds_info): if valid_range is not None: # Due to a bug in the valid_range upper limit in the 10.8(24) and 12.0(25) # in the HDF data, this is hardcoded here. - if dataset_id.name in ['24', '25'] and valid_range[1] == 4095: + if dataset_id['name'] in ['24', '25'] and valid_range[1] == 4095: valid_range[1] = 25000 # typically bad_values == 65535, saturated == 65534 # dead detector == 65533 @@ -110,20 +110,20 @@ def get_dataset(self, dataset_id, ds_info): slope = attrs.pop('Slope', None) intercept = attrs.pop('Intercept', None) - if slope is not None and dataset_id.calibration != 'counts': + if slope is not None and dataset_id['calibration'] != 'counts': if band_index is not None: slope = slope[band_index] intercept = intercept[band_index] data = data * slope + intercept - if dataset_id.calibration == "reflectance": + if dataset_id['calibration'] == "reflectance": # some bands have 0 counts for the first N columns and # seem to be invalid data points data = data.where(data != 0) coeffs = self._get_coefficients(ds_info['calibration_key'], ds_info['calibration_index']) data = coeffs[0] + coeffs[1] * data + coeffs[2] * data**2 - elif dataset_id.calibration == "brightness_temperature": + elif dataset_id['calibration'] == "brightness_temperature": cal_index = ds_info['calibration_index'] # Apparently we don't use these calibration factors for Rad -> BT # coeffs = self._get_coefficients(ds_info['calibration_key'], cal_index) @@ -137,7 +137,7 @@ def get_dataset(self, dataset_id, ds_info): # Converts um^-1 (wavenumbers) and (mW/m^2)/(str/cm^-1) (radiance data) # to SI units m^-1, mW*m^-3*str^-1. - wave_number = 1. / (dataset_id.wavelength[1] / 1e6) + wave_number = 1. / (dataset_id['wavelength'][1] / 1e6) # pass the dask array bt_data = rad2temp(wave_number, data.data * 1e-5) # brightness temperature if isinstance(bt_data, np.ndarray): diff --git a/satpy/readers/mimic_TPW2_nc.py b/satpy/readers/mimic_TPW2_nc.py index 62a773db42..8060b03915 100644 --- a/satpy/readers/mimic_TPW2_nc.py +++ b/satpy/readers/mimic_TPW2_nc.py @@ -103,8 +103,8 @@ def available_datasets(self, configured_datasets=None): def get_dataset(self, ds_id, info): """Load dataset designated by the given key from file.""" - logger.debug("Getting data for: %s", ds_id.name) - file_key = info.get('file_key', ds_id.name) + logger.debug("Getting data for: %s", ds_id['name']) + file_key = info.get('file_key', ds_id['name']) data = np.flipud(self[file_key]) data = xr.DataArray(data, dims=['y', 'x']) data.attrs = self.get_metadata(data, info) diff --git a/satpy/readers/modis_l1b.py b/satpy/readers/modis_l1b.py index 196e9f6396..a4e888c612 100644 --- a/satpy/readers/modis_l1b.py +++ b/satpy/readers/modis_l1b.py @@ -16,7 +16,7 @@ # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Modis level 1b hdf-eos format reader +"""Modis level 1b hdf-eos format reader. Introduction ------------ @@ -63,6 +63,7 @@ class HDFEOSBandReader(HDFEOSBaseFileReader): "H": 500} def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" HDFEOSBaseFileReader.__init__(self, filename, filename_info, filetype_info) ds = self.metadata['INVENTORYMETADATA'][ @@ -86,7 +87,7 @@ def get_dataset(self, key, info): info.update({'platform_name': 'EOS-' + platform_name}) info.update({'sensor': 'modis'}) - if self.resolution != key.resolution: + if self.resolution != key['resolution']: return datasets = datadict[self.resolution] @@ -97,7 +98,7 @@ def get_dataset(self, key, info): # get the relative indices of the desired channel try: - index = band_names.index(key.name) + index = band_names.index(key['name']) except ValueError: continue uncertainty = self.sd.select(dataset + "_Uncert_Indexes") @@ -129,21 +130,21 @@ def get_dataset(self, key, info): array = array.where(array <= np.float32(valid_range[1])) array = array.where(from_sds(uncertainty, chunks=CHUNK_SIZE)[index, :, :] < 15) - if key.calibration == 'brightness_temperature': - projectable = calibrate_bt(array, var_attrs, index, key.name) + if key['calibration'] == 'brightness_temperature': + projectable = calibrate_bt(array, var_attrs, index, key['name']) info.setdefault('units', 'K') info.setdefault('standard_name', 'toa_brightness_temperature') - elif key.calibration == 'reflectance': + elif key['calibration'] == 'reflectance': projectable = calibrate_refl(array, var_attrs, index) info.setdefault('units', '%') info.setdefault('standard_name', 'toa_bidirectional_reflectance') - elif key.calibration == 'radiance': + elif key['calibration'] == 'radiance': projectable = calibrate_radiance(array, var_attrs, index) info.setdefault('units', var_attrs.get('radiance_units')) info.setdefault('standard_name', 'toa_outgoing_radiance_per_unit_wavelength') - elif key.calibration == 'counts': + elif key['calibration'] == 'counts': projectable = calibrate_counts(array, var_attrs, index) info.setdefault('units', 'counts') info.setdefault('standard_name', 'counts') # made up @@ -152,8 +153,8 @@ def get_dataset(self, key, info): "key: {}".format(key)) projectable.attrs = info - # if ((platform_name == 'Aqua' and key.name in ["6", "27", "36"]) or - # (platform_name == 'Terra' and key.name in ["29"])): + # if ((platform_name == 'Aqua' and key['name'] in ["6", "27", "36"]) or + # (platform_name == 'Terra' and key['name'] in ["29"])): # height, width = projectable.shape # row_indices = projectable.mask.sum(1) == width # if row_indices.sum() != height: @@ -187,11 +188,13 @@ class MixedHDFEOSReader(HDFEOSGeoReader, HDFEOSBandReader): """A file handler for the files that have both regular bands and geographical information in them.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" HDFEOSGeoReader.__init__(self, filename, filename_info, filetype_info) HDFEOSBandReader.__init__(self, filename, filename_info, filetype_info) def get_dataset(self, key, info): - if key.name in HDFEOSGeoReader.DATASET_NAMES: + """Get the dataset.""" + if key['name'] in HDFEOSGeoReader.DATASET_NAMES: return HDFEOSGeoReader.get_dataset(self, key, info) return HDFEOSBandReader.get_dataset(self, key, info) diff --git a/satpy/readers/modis_l2.py b/satpy/readers/modis_l2.py index 3591643c3c..cf8c1914ce 100644 --- a/satpy/readers/modis_l2.py +++ b/satpy/readers/modis_l2.py @@ -91,7 +91,7 @@ def _parse_resolution_info(self, info, resolution): def get_dataset(self, dataset_id, dataset_info): """Get DataArray for specified dataset.""" - dataset_name = dataset_id.name + dataset_name = dataset_id['name'] if dataset_name in HDFEOSGeoReader.DATASET_NAMES: return HDFEOSGeoReader.get_dataset(self, dataset_id, dataset_info) dataset_name_in_file = dataset_info['file_key'] @@ -101,11 +101,11 @@ def get_dataset(self, dataset_id, dataset_info): byte_dimension = dataset_info['byte_dimension'] # Where the information is stored dataset = self._select_hdf_dataset(dataset_name_in_file, byte_dimension) - byte_information = self._parse_resolution_info(dataset_info['byte'], dataset_id.resolution) + byte_information = self._parse_resolution_info(dataset_info['byte'], dataset_id['resolution']) # At which bit starts the information - bit_start = self._parse_resolution_info(dataset_info['bit_start'], dataset_id.resolution) + bit_start = self._parse_resolution_info(dataset_info['bit_start'], dataset_id['resolution']) # How many bits store the information - bit_count = self._parse_resolution_info(dataset_info['bit_count'], dataset_id.resolution) + bit_count = self._parse_resolution_info(dataset_info['bit_count'], dataset_id['resolution']) # Only one byte: select the byte information if isinstance(byte_information, int): @@ -132,13 +132,12 @@ def get_dataset(self, dataset_id, dataset_info): # Apply quality assurance filter if 'quality_assurance' in dataset_info: quality_assurance_required = self._parse_resolution_info( - dataset_info['quality_assurance'], dataset_id.resolution + dataset_info['quality_assurance'], dataset_id['resolution'] ) if quality_assurance_required is True: # Get quality assurance dataset recursively - from satpy import DatasetID - quality_assurance_dataset_id = DatasetID( - name='quality_assurance', resolution=1000 + quality_assurance_dataset_id = dataset_id.from_dict( + dict(name='quality_assurance', resolution=1000) ) quality_assurance_dataset_info = { 'name': 'quality_assurance', diff --git a/satpy/readers/msi_safe.py b/satpy/readers/msi_safe.py index c9870cac1c..0d5a727ef2 100644 --- a/satpy/readers/msi_safe.py +++ b/satpy/readers/msi_safe.py @@ -55,10 +55,10 @@ def __init__(self, filename, filename_info, filetype_info, mda): def get_dataset(self, key, info): """Load a dataset.""" - if self._channel != key.name: + if self._channel != key['name']: return - logger.debug('Reading %s.', key.name) + logger.debug('Reading %s.', key['name']) # FIXME: get this from MTD_MSIL1C.xml quantification_value = 10000. jp2 = glymur.Jp2k(self.filename) @@ -96,7 +96,7 @@ def end_time(self): def get_area_def(self, dsid): """Get the area def.""" - if self._channel != dsid.name: + if self._channel != dsid['name']: return return self._mda.get_area_def(dsid) @@ -132,9 +132,9 @@ def get_area_def(self, dsid): CRS = None geocoding = self.root.find('.//Tile_Geocoding') epsg = geocoding.find('HORIZONTAL_CS_CODE').text - rows = int(geocoding.find('Size[@resolution="' + str(dsid.resolution) + '"]/NROWS').text) - cols = int(geocoding.find('Size[@resolution="' + str(dsid.resolution) + '"]/NCOLS').text) - geoposition = geocoding.find('Geoposition[@resolution="' + str(dsid.resolution) + '"]') + rows = int(geocoding.find('Size[@resolution="' + str(dsid['resolution']) + '"]/NROWS').text) + cols = int(geocoding.find('Size[@resolution="' + str(dsid['resolution']) + '"]/NCOLS').text) + geoposition = geocoding.find('Geoposition[@resolution="' + str(dsid['resolution']) + '"]') ulx = float(geoposition.find('ULX').text) uly = float(geoposition.find('ULY').text) xdim = float(geoposition.find('XDIM').text) @@ -212,7 +212,7 @@ def get_dataset(self, key, info): darr = darr.ffill('x') angles = darr.data - res = self.interpolate_angles(angles, key.resolution) + res = self.interpolate_angles(angles, key['resolution']) proj = DataArray(res, dims=['y', 'x']) proj.attrs = info.copy() diff --git a/satpy/readers/netcdf_utils.py b/satpy/readers/netcdf_utils.py index 2a18d4c26b..54bc332f20 100644 --- a/satpy/readers/netcdf_utils.py +++ b/satpy/readers/netcdf_utils.py @@ -121,7 +121,7 @@ def __init__(self, filename, filename_info, filetype_info, self._xarray_kwargs.setdefault('mask_and_scale', self.auto_maskandscale) def __del__(self): - """Delete object.""" + """Delete the file handler.""" if self.file_handle is not None: try: self.file_handle.close() diff --git a/satpy/readers/nucaps.py b/satpy/readers/nucaps.py index e3f96fff25..485613d35b 100644 --- a/satpy/readers/nucaps.py +++ b/satpy/readers/nucaps.py @@ -142,7 +142,7 @@ def sensor_names(self): def get_shape(self, ds_id, ds_info): """Return data array shape for item specified.""" - var_path = ds_info.get('file_key', '{}'.format(ds_id.name)) + var_path = ds_info.get('file_key', '{}'.format(ds_id['name'])) if var_path + '/shape' not in self: # loading a scalar value shape = 1 @@ -156,7 +156,7 @@ def get_shape(self, ds_id, ds_info): def get_metadata(self, dataset_id, ds_info): """Get metadata.""" - var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', '{}'.format(dataset_id['name'])) shape = self.get_shape(dataset_id, ds_info) file_units = ds_info.get('file_units', self.get(var_path + '/attr/units')) @@ -178,7 +178,7 @@ def get_metadata(self, dataset_id, ds_info): if 'standard_name' not in info: sname_path = var_path + '/attr/standard_name' info['standard_name'] = self.get(sname_path) - if dataset_id.name != 'Quality_Flag': + if dataset_id['name'] != 'Quality_Flag': anc_vars = info.get('ancillary_variables', []) if 'Quality_Flag' not in anc_vars: anc_vars.append('Quality_Flag') @@ -187,7 +187,7 @@ def get_metadata(self, dataset_id, ds_info): def get_dataset(self, dataset_id, ds_info): """Load data array and metadata for specified dataset.""" - var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', '{}'.format(dataset_id['name'])) metadata = self.get_metadata(dataset_id, ds_info) valid_min, valid_max = self[var_path + '/attr/valid_range'] fill_value = self.get(var_path + '/attr/_FillValue') @@ -248,7 +248,7 @@ def __init__(self, config_files, mask_surface=True, mask_quality=True, **kwargs) self.mask_quality = self.info.get('mask_quality', mask_quality) def load_ds_ids_from_config(self): - """Convert config dataset entries to DatasetIDs. + """Convert config dataset entries to DataIDs. Special handling is done to provide level specific datasets for any pressured based datasets. For example, a dataset is @@ -269,12 +269,12 @@ def load_ds_ids_from_config(self): new_info = ds_info.copy() new_info['pressure_level'] = lvl_num new_info['pressure_index'] = idx - new_info['file_key'] = '{}'.format(ds_id.name) - new_info['name'] = ds_id.name + suffix + new_info['file_key'] = '{}'.format(ds_id['name']) + new_info['name'] = ds_id['name'] + suffix new_ds_id = ds_id._replace(name=new_info['name']) new_info['id'] = new_ds_id self.all_ids[new_ds_id] = new_info - self.pressure_dataset_names[ds_id.name].append(new_info['name']) + self.pressure_dataset_names[ds_id['name']].append(new_info['name']) def load(self, dataset_keys, previous_datasets=None, pressure_levels=None): """Load data from one or more set of files. diff --git a/satpy/readers/nwcsaf_msg2013_hdf5.py b/satpy/readers/nwcsaf_msg2013_hdf5.py index fb0610d44a..3ce07d859d 100644 --- a/satpy/readers/nwcsaf_msg2013_hdf5.py +++ b/satpy/readers/nwcsaf_msg2013_hdf5.py @@ -54,21 +54,21 @@ def __init__(self, filename, filename_info, filetype_info): def get_dataset(self, dataset_id, ds_info): """Load a dataset.""" - file_key = ds_info.get('file_key', dataset_id.name) + file_key = ds_info.get('file_key', dataset_id['name']) data = self[file_key] nodata = None if 'SCALING_FACTOR' in data.attrs and 'OFFSET' in data.attrs: dtype = np.dtype(data.data) - if dataset_id.name in ['ctth_alti']: + if dataset_id['name'] in ['ctth_alti']: data.attrs['valid_range'] = (0, 27000) data.attrs['_FillValue'] = np.nan - if dataset_id.name in ['ctth_alti', 'ctth_pres', 'ctth_tempe', 'ctth_effective_cloudiness']: + if dataset_id['name'] in ['ctth_alti', 'ctth_pres', 'ctth_tempe', 'ctth_effective_cloudiness']: dtype = np.dtype('float32') nodata = 255 - if dataset_id.name in ['ct']: + if dataset_id['name'] in ['ct']: data.attrs['valid_range'] = (0, 20) data.attrs['_FillValue'] = 255 # data.attrs['palette_meanings'] = list(range(21)) @@ -90,7 +90,7 @@ def get_dataset(self, dataset_id, ds_info): def get_area_def(self, dsid): """Get the area definition of the datasets in the file.""" - if dsid.name.endswith('_pal'): + if dsid['name'].endswith('_pal'): raise NotImplementedError cfac = self.file_content['/attr/CFAC'] diff --git a/satpy/readers/nwcsaf_nc.py b/satpy/readers/nwcsaf_nc.py index 8d0232d20f..2c707a38de 100644 --- a/satpy/readers/nwcsaf_nc.py +++ b/satpy/readers/nwcsaf_nc.py @@ -103,7 +103,6 @@ def __init__(self, filename, filename_info, filetype_info): def set_platform_and_sensor(self, **kwargs): """Set some metadata: platform_name, sensors, and pps (identifying PPS or Geo).""" - try: # NWCSAF/Geo self.platform_name = PLATFORM_NAMES.get(kwargs['sat_id'], kwargs['sat_id']) @@ -125,7 +124,7 @@ def remove_timedim(self, var): def get_dataset(self, dsid, info): """Load a dataset.""" - dsid_name = dsid.name + dsid_name = dsid['name'] if dsid_name in self.cache: logger.debug('Get the data set from cache: %s.', dsid_name) return self.cache[dsid_name] @@ -141,7 +140,7 @@ def get_dataset(self, dsid, info): # Get full resolution lon,lat from the reduced (tie points) grid self.upsample_geolocation(dsid, info) - return self.cache[dsid.name] + return self.cache[dsid['name']] return variable @@ -214,13 +213,13 @@ def scale_dataset(self, dsid, variable, info): if 'standard_name' in info: variable.attrs.setdefault('standard_name', info['standard_name']) - if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti': + if self.sw_version == 'NWC/PPS version v2014' and dsid['name'] == 'ctth_alti': # pps 2014 valid range and palette don't match variable.attrs['valid_range'] = (0., 9000.) - if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti_pal': + if self.sw_version == 'NWC/PPS version v2014' and dsid['name'] == 'ctth_alti_pal': # pps 2014 palette has the nodata color (black) first variable = variable[1:, :] - if self.sw_version == 'NWC/GEO version v2016' and dsid.name == 'ctth_alti': + if self.sw_version == 'NWC/GEO version v2016' and dsid['name'] == 'ctth_alti': # Geo 2016/18 valid range and palette don't match # Valid range is 0 to 27000 in the file. But after scaling the valid range becomes -2000 to 25000 # This now fixed by the scaling of the valid range above. @@ -261,12 +260,12 @@ def get_area_def(self, dsid): # PPS: raise NotImplementedError - if dsid.name.endswith('_pal'): + if dsid['name'].endswith('_pal'): raise NotImplementedError proj_str, area_extent = self._get_projection() - nlines, ncols = self.nc[dsid.name].shape + nlines, ncols = self.nc[dsid['name']].shape area = get_area_def('some_area_name', "On-the-fly area", diff --git a/satpy/readers/olci_nc.py b/satpy/readers/olci_nc.py index 96a6199a4e..88cca91cb8 100644 --- a/satpy/readers/olci_nc.py +++ b/satpy/readers/olci_nc.py @@ -127,8 +127,8 @@ def end_time(self): def get_dataset(self, key, info): """Load a dataset.""" - logger.debug('Reading %s.', key.name) - variable = self.nc[key.name] + logger.debug('Reading %s.', key['name']) + variable = self.nc[key['name']] return variable @@ -189,14 +189,14 @@ def _get_solar_flux(self, band): def get_dataset(self, key, info): """Load a dataset.""" - if self.channel != key.name: + if self.channel != key['name']: return - logger.debug('Reading %s.', key.name) + logger.debug('Reading %s.', key['name']) radiances = self.nc[self.channel + '_radiance'] - if key.calibration == 'reflectance': - idx = int(key.name[2:]) - 1 + if key['calibration'] == 'reflectance': + idx = int(key['name'][2:]) - 1 sflux = self._get_solar_flux(idx) radiances = radiances / sflux * np.pi * 100 radiances.attrs['units'] = '%' @@ -212,17 +212,17 @@ class NCOLCI2(NCOLCIChannelBase): def get_dataset(self, key, info): """Load a dataset.""" - if self.channel is not None and self.channel != key.name: + if self.channel is not None and self.channel != key['name']: return - logger.debug('Reading %s.', key.name) + logger.debug('Reading %s.', key['name']) if self.channel is not None and self.channel.startswith('Oa'): dataset = self.nc[self.channel + '_reflectance'] else: dataset = self.nc[info['nc_key']] - if key.name == 'wqsf': + if key['name'] == 'wqsf': dataset.attrs['_FillValue'] = 1 - elif key.name == 'mask': + elif key['name'] == 'mask': dataset = self.getbitmask(dataset) dataset.attrs['platform_name'] = self.platform_name @@ -313,27 +313,27 @@ class NCOLCIAngles(NCOLCILowResData): def get_dataset(self, key, info): """Load a dataset.""" - if key.name not in self.datasets: + if key['name'] not in self.datasets: return self._open_dataset() - logger.debug('Reading %s.', key.name) + logger.debug('Reading %s.', key['name']) - if self._need_interpolation() and self.cache.get(key.name) is None: + if self._need_interpolation() and self.cache.get(key['name']) is None: - if key.name.startswith('satellite'): + if key['name'].startswith('satellite'): zen = self.nc[self.datasets['satellite_zenith_angle']] zattrs = zen.attrs azi = self.nc[self.datasets['satellite_azimuth_angle']] aattrs = azi.attrs - elif key.name.startswith('solar'): + elif key['name'].startswith('solar'): zen = self.nc[self.datasets['solar_zenith_angle']] zattrs = zen.attrs azi = self.nc[self.datasets['solar_azimuth_angle']] aattrs = azi.attrs else: - raise NotImplementedError("Don't know how to read " + key.name) + raise NotImplementedError("Don't know how to read " + key['name']) x, y, z = angle2xyz(azi, zen) @@ -343,24 +343,24 @@ def get_dataset(self, key, info): azi.attrs = aattrs zen.attrs = zattrs - if 'zenith' in key.name: + if 'zenith' in key['name']: values = zen - elif 'azimuth' in key.name: + elif 'azimuth' in key['name']: values = azi else: - raise NotImplementedError("Don't know how to read " + key.name) + raise NotImplementedError("Don't know how to read " + key['name']) - if key.name.startswith('satellite'): + if key['name'].startswith('satellite'): self.cache['satellite_zenith_angle'] = zen self.cache['satellite_azimuth_angle'] = azi - elif key.name.startswith('solar'): + elif key['name'].startswith('solar'): self.cache['solar_zenith_angle'] = zen self.cache['solar_azimuth_angle'] = azi - elif key.name in self.cache: - values = self.cache[key.name] + elif key['name'] in self.cache: + values = self.cache[key['name']] else: - values = self.nc[self.datasets[key.name]] + values = self.nc[self.datasets[key['name']]] values.attrs['platform_name'] = self.platform_name values.attrs['sensor'] = self.sensor @@ -388,26 +388,26 @@ class NCOLCIMeteo(NCOLCILowResData): def get_dataset(self, key, info): """Load a dataset.""" - if key.name not in self.datasets: + if key['name'] not in self.datasets: return self._open_dataset() - logger.debug('Reading %s.', key.name) + logger.debug('Reading %s.', key['name']) - if self._need_interpolation() and self.cache.get(key.name) is None: + if self._need_interpolation() and self.cache.get(key['name']) is None: - data = self.nc[key.name] + data = self.nc[key['name']] values, = self._do_interpolate(data) values.attrs = data.attrs - self.cache[key.name] = values + self.cache[key['name']] = values - elif key.name in self.cache: - values = self.cache[key.name] + elif key['name'] in self.cache: + values = self.cache[key['name']] else: - values = self.nc[key.name] + values = self.nc[key['name']] values.attrs['platform_name'] = self.platform_name values.attrs['sensor'] = self.sensor diff --git a/satpy/readers/omps_edr.py b/satpy/readers/omps_edr.py index 421463ddfc..55b591c0ca 100644 --- a/satpy/readers/omps_edr.py +++ b/satpy/readers/omps_edr.py @@ -67,7 +67,7 @@ def adjust_scaling_factors(self, factors, file_units, output_units): def get_metadata(self, dataset_id, ds_info): """Get the metadata.""" - var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', '{}'.format(dataset_id['name'])) info = getattr(self[var_path], 'attrs', {}).copy() info.pop('DIMENSION_LIST', None) info.update(ds_info) @@ -93,12 +93,12 @@ def get_metadata(self, dataset_id, ds_info): }) info.update(dataset_id.to_dict()) if 'standard_name' not in ds_info: - info['standard_name'] = self.get(var_path + '/attr/Title', dataset_id.name) + info['standard_name'] = self.get(var_path + '/attr/Title', dataset_id['name']) return info def get_dataset(self, dataset_id, ds_info): """Get the dataset.""" - var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', '{}'.format(dataset_id['name'])) metadata = self.get_metadata(dataset_id, ds_info) valid_min, valid_max = self.get(var_path + '/attr/valid_range', self.get(var_path + '/attr/ValidRange', (None, None))) diff --git a/satpy/readers/safe_sar_l2_ocn.py b/satpy/readers/safe_sar_l2_ocn.py index 6b507ad352..292794078b 100644 --- a/satpy/readers/safe_sar_l2_ocn.py +++ b/satpy/readers/safe_sar_l2_ocn.py @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""SAFE SAR L2 OCN format reader +"""SAFE SAR L2 OCN format reader. The OCN data contains various parameters, but mainly the wind speed and direction calculated from SAR data and input model data from ECMWF @@ -40,6 +40,7 @@ class SAFENC(BaseFileHandler): """Measurement file reader.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file reader.""" super(SAFENC, self).__init__(filename, filename_info, filetype_info) @@ -69,26 +70,26 @@ def __init__(self, filename, filename_info, filetype_info): def get_dataset(self, key, info): """Load a dataset.""" - if key.name in ['owiLat', 'owiLon']: + if key['name'] in ['owiLat', 'owiLon']: if self.lons is None or self.lats is None: self.lons = self.nc['owiLon'] self.lats = self.nc['owiLat'] - if key.name == 'owiLat': + if key['name'] == 'owiLat': res = self.lats else: res = self.lons res.attrs = info else: - res = self.nc[key.name] - if key.name in ['owiHs', 'owiWl', 'owiDirmet']: + res = self.nc[key['name']] + if key['name'] in ['owiHs', 'owiWl', 'owiDirmet']: res = xr.DataArray(res, dims=['y', 'x', 'oswPartitions']) - elif key.name in ['owiNrcs', 'owiNesz', 'owiNrcsNeszCorr']: + elif key['name'] in ['owiNrcs', 'owiNesz', 'owiNrcsNeszCorr']: res = xr.DataArray(res, dims=['y', 'x', 'oswPolarisation']) - elif key.name in ['owiPolarisationName']: + elif key['name'] in ['owiPolarisationName']: res = xr.DataArray(res, dims=['owiPolarisation']) - elif key.name in ['owiCalConstObsi', 'owiCalConstInci']: + elif key['name'] in ['owiCalConstObsi', 'owiCalConstInci']: res = xr.DataArray(res, dims=['owiIncSize']) - elif key.name.startswith('owi'): + elif key['name'].startswith('owi'): res = xr.DataArray(res, dims=['y', 'x']) else: res = xr.DataArray(res, dims=['y', 'x']) diff --git a/satpy/readers/sar_c_safe.py b/satpy/readers/sar_c_safe.py index db69af5716..6ff45675b5 100644 --- a/satpy/readers/sar_c_safe.py +++ b/satpy/readers/sar_c_safe.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""SAFE SAR-C reader -********************* +"""SAFE SAR-C reader. This module implements a reader for Sentinel 1 SAR-C GRD (level1) SAFE format as provided by ESA. The format is comprised of a directory containing multiple @@ -24,15 +23,14 @@ calibration, noise and metadata. References: + - *Level 1 Product Formatting* + https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-1-sar/products-algorithms/level-1-product-formatting - - *Level 1 Product Formatting* - https://sentinel.esa.int/web/sentinel/technical-guides/sentinel-1-sar/products-algorithms/level-1-product-formatting - - - J. Park, A. A. Korosov, M. Babiker, S. Sandven and J. Won, - *"Efficient Thermal Noise Removal for Sentinel-1 TOPSAR Cross-Polarization Channel,"* - in IEEE Transactions on Geoscience and Remote Sensing, vol. 56, no. 3, - pp. 1555-1565, March 2018. - doi: `10.1109/TGRS.2017.2765248 `_ + - J. Park, A. A. Korosov, M. Babiker, S. Sandven and J. Won, + *"Efficient Thermal Noise Removal for Sentinel-1 TOPSAR Cross-Polarization Channel,"* + in IEEE Transactions on Geoscience and Remote Sensing, vol. 56, no. 3, + pp. 1555-1565, March 2018. + doi: `10.1109/TGRS.2017.2765248 `_ """ @@ -80,6 +78,7 @@ class SAFEXML(BaseFileHandler): def __init__(self, filename, filename_info, filetype_info, header_file=None): + """Init the xml filehandler.""" super(SAFEXML, self).__init__(filename, filename_info, filetype_info) self._start_time = filename_info['start_time'] @@ -173,7 +172,7 @@ def get_dataset(self, key, info): continue data, low_res_coords = self.read_xml_array(data_items, xml_tag) - if key.name.endswith('squared'): + if key['name'].endswith('squared'): data **= 2 data = self.interpolate_xml_array(data, low_res_coords, data.shape) @@ -194,10 +193,10 @@ def get_noise_correction(self, shape, chunks=None): noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks) return noise - def get_calibration(self, name, shape, chunks=None): + def get_calibration(self, calibration_name, shape, chunks=None): """Get the calibration array.""" data_items = self.root.findall(".//calibrationVector") - data, low_res_coords = self.read_xml_array(data_items, name) + data, low_res_coords = self.read_xml_array(data_items, calibration_name) return self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks) def get_calibration_constant(self): @@ -206,10 +205,12 @@ def get_calibration_constant(self): @property def start_time(self): + """Get the start time.""" return self._start_time @property def end_time(self): + """Get the end time.""" return self._end_time @@ -247,6 +248,7 @@ def interpolate_xarray(xpoints, ypoints, values, shape, kind='cubic', def intp(grid_x, grid_y, interpolator): + """Interpolate.""" return interpolator((grid_y, grid_x)) @@ -284,6 +286,7 @@ class SAFEGRD(BaseFileHandler): """ def __init__(self, filename, filename_info, filetype_info, calfh, noisefh): + """Init the grd filehandler.""" super(SAFEGRD, self).__init__(filename, filename_info, filetype_info) @@ -309,26 +312,26 @@ def get_dataset(self, key, info): if self._polarization != key.polarization: return - logger.debug('Reading %s.', key.name) + logger.debug('Reading %s.', key['name']) - if key.name in ['longitude', 'latitude']: + if key['name'] in ['longitude', 'latitude']: logger.debug('Constructing coordinate arrays.') if self.lons is None or self.lats is None: self.lons, self.lats, self.alts = self.get_lonlatalts() - if key.name == 'latitude': + if key['name'] == 'latitude': data = self.lats else: data = self.lons data.attrs.update(info) else: - calibration = key.calibration or 'gamma' - if calibration == 'sigma_nought': - calibration = 'sigmaNought' - elif calibration == 'beta_nought': - calibration = 'betaNought' + calibration_name = key['calibration'].name or 'gamma' + if calibration_name == 'sigma_nought': + calibration_name = 'sigmaNought' + elif calibration_name == 'beta_nought': + calibration_name = 'betaNought' data = self.read_band() # chunks = data.chunks # This seems to be slower for some reason @@ -338,7 +341,7 @@ def get_dataset(self, key, info): logger.debug('Reading calibration data.') - cal = self.calibration.get_calibration(calibration, data.shape, chunks=chunks) + cal = self.calibration.get_calibration(calibration_name, data.shape, chunks=chunks) cal_constant = self.calibration.get_calibration_constant() logger.debug('Calibrating.') @@ -354,7 +357,7 @@ def get_dataset(self, key, info): data.attrs.update({'platform_name': self._mission_id}) - data.attrs['units'] = calibration + data.attrs['units'] = '1' return data @@ -481,8 +484,10 @@ def get_gcps(self): @property def start_time(self): + """Get the start time.""" return self._start_time @property def end_time(self): + """Get the end time.""" return self._end_time diff --git a/satpy/readers/scmi.py b/satpy/readers/scmi.py index 19a27e20b1..ef995f25b4 100644 --- a/satpy/readers/scmi.py +++ b/satpy/readers/scmi.py @@ -130,7 +130,7 @@ def get_shape(self, key, info): def get_dataset(self, key, info): """Load a dataset.""" - logger.debug('Reading in get_dataset %s.', key.name) + logger.debug('Reading in get_dataset %s.', key['name']) var_name = info.get('file_key', self.filetype_info.get('file_key')) if var_name: data = self[var_name] @@ -147,7 +147,7 @@ def get_dataset(self, key, info): offset = data.attrs.pop('add_offset', 0) units = data.attrs.get('units', 1) # the '*1' unit is some weird convention added/needed by AWIPS - if units in ['1', '*1'] and key.calibration == 'reflectance': + if units in ['1', '*1'] and key['calibration'] == 'reflectance': data *= 100 factor *= 100 # used for valid_min/max data.attrs['units'] = '%' diff --git a/satpy/readers/seviri_l1b_hrit.py b/satpy/readers/seviri_l1b_hrit.py index 3a3b9f5fbe..6532be3512 100644 --- a/satpy/readers/seviri_l1b_hrit.py +++ b/satpy/readers/seviri_l1b_hrit.py @@ -592,7 +592,6 @@ def _get_area_extent(self, pdict): def get_area_def(self, dsid): """Get the area definition of the band.""" - # Common parameters for both HRV and other channels nlines = int(self.mda['number_of_lines']) loff = np.float32(self.mda['loff']) @@ -615,7 +614,7 @@ def get_area_def(self, dsid): pdict['scandir'] = 'S2N' # Compute area definition for non-HRV channels: - if dsid.name != 'HRV': + if dsid['name'] != 'HRV': pdict['loff'] = loff - nlines aex = self._get_area_extent(pdict) pdict['a_name'] = 'geosmsg' @@ -672,8 +671,8 @@ def get_area_def(self, dsid): def get_dataset(self, key, info): """Get the dataset.""" res = super(HRITMSGFileHandler, self).get_dataset(key, info) - res = self.calibrate(res, key.calibration) - if key.name == 'HRV' and self.fill_hrv: + res = self.calibrate(res, key['calibration']) + if key['name'] == 'HRV' and self.fill_hrv: res = self.pad_hrv_data(res) res.attrs['units'] = info['units'] diff --git a/satpy/readers/seviri_l1b_icare.py b/satpy/readers/seviri_l1b_icare.py index 07345f8fed..748f18e273 100644 --- a/satpy/readers/seviri_l1b_icare.py +++ b/satpy/readers/seviri_l1b_icare.py @@ -77,7 +77,9 @@ class SEVIRI_ICARE(HDF4FileHandler): """SEVIRI L1B handler for HDF4 files.""" + def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" super(SEVIRI_ICARE, self).__init__(filename, filename_info, filetype_info) @@ -91,6 +93,7 @@ def __init__(self, filename, filename_info, filetype_info): @property def sensor_name(self): + """Get the sensor name.""" # the sensor and platform names are stored together, eg: MSG1/SEVIRI attr = self['/attr/Sensors'] if isinstance(attr, np.ndarray): @@ -114,6 +117,7 @@ def sensor_name(self): @property def satlon(self): + """Get the satellite longitude.""" attr = self['/attr/Sub_Satellite_Longitude'] if isinstance(attr, np.ndarray): attr = float(attr.astype(str)) @@ -121,6 +125,7 @@ def satlon(self): @property def projlon(self): + """Get the projection longitude.""" attr = self['/attr/Projection_Longitude'] if isinstance(attr, np.ndarray): attr = float(attr.astype(str)) @@ -128,6 +133,7 @@ def projlon(self): @property def projection(self): + """Get the projection.""" attr = self['/attr/Geographic_Projection'] if isinstance(attr, np.ndarray): attr = str(attr.astype(str)) @@ -139,6 +145,7 @@ def projection(self): @property def zone(self): + """Get the zone.""" attr = self['/attr/Zone'] if isinstance(attr, np.ndarray): attr = str(attr.astype(str)).lower() @@ -146,6 +153,7 @@ def zone(self): @property def res(self): + """Get the resolution.""" attr = self['/attr/Nadir_Pixel_Size'] if isinstance(attr, np.ndarray): attr = str(attr.astype(str)).lower() @@ -153,6 +161,7 @@ def res(self): @property def end_time(self): + """Get the end time.""" attr = self['/attr/End_Acquisition_Date'] if isinstance(attr, np.ndarray): attr = str(attr.astype(str)) @@ -165,6 +174,7 @@ def end_time(self): @property def start_time(self): + """Get the start time.""" attr = self['/attr/Beginning_Acquisition_Date'] if isinstance(attr, np.ndarray): attr = str(attr.astype(str)) @@ -177,6 +187,7 @@ def start_time(self): @property def alt(self): + """Get the altitude.""" attr = self['/attr/Altitude'] if isinstance(attr, np.ndarray): attr = attr.astype(str) @@ -187,6 +198,7 @@ def alt(self): @property def geoloc(self): + """Get the geolocation.""" attr = self['/attr/Geolocation'] if isinstance(attr, np.ndarray): attr = attr.astype(str) @@ -197,6 +209,7 @@ def geoloc(self): return [cfac, lfac, coff, loff] def get_metadata(self, data, ds_info): + """Get the metadata.""" mda = {} mda.update(data.attrs) mda.update(ds_info) @@ -221,16 +234,17 @@ def get_metadata(self, data, ds_info): return mda def _get_dsname(self, ds_id): - """Returns the correct dataset name based on requested band.""" - if ds_id.name in self.ref_bands: + """Return the correct dataset name based on requested band.""" + if ds_id['name'] in self.ref_bands: ds_get_name = 'Normalized_Radiance' - elif ds_id.name in self.bt_bands: + elif ds_id['name'] in self.bt_bands: ds_get_name = 'Brightness_Temperature' else: - raise NameError("Datset type "+ds_id.name+" is not supported.") + raise NameError("Datset type "+ds_id['name']+" is not supported.") return ds_get_name def get_dataset(self, ds_id, ds_info): + """Get the dataset.""" ds_get_name = self._get_dsname(ds_id) data = self[ds_get_name] data.attrs = self.get_metadata(data, ds_info) @@ -243,11 +257,12 @@ def get_dataset(self, ds_id, ds_info): data.values *= scale_factor data.values += offset # Now we correct range from 0-1 to 0-100 for VIS: - if ds_id.name in self.ref_bands: + if ds_id['name'] in self.ref_bands: data.values *= 100. return data def get_area_def(self, ds_id): + """Get the area def.""" ds_get_name = self._get_dsname(ds_id) ds_shape = self[ds_get_name + '/shape'] geoloc = self.geoloc @@ -271,7 +286,7 @@ def get_area_def(self, ds_id): # Force scandir to SEVIRI default, not known from file pdict['scandir'] = 'S2N' pdict['a_name'] = 'geosmsg' - if ds_id.name == 'HRV': + if ds_id['name'] == 'HRV': pdict['a_desc'] = 'MSG/SEVIRI HRV channel area' pdict['p_id'] = 'msg_hires' else: diff --git a/satpy/readers/seviri_l1b_native.py b/satpy/readers/seviri_l1b_native.py index fe8b54d7fb..28e2c0227b 100644 --- a/satpy/readers/seviri_l1b_native.py +++ b/satpy/readers/seviri_l1b_native.py @@ -250,7 +250,7 @@ def get_area_def(self, dataset_id): pdict['h'] = self.mda['projection_parameters']['h'] pdict['ssp_lon'] = self.mda['projection_parameters']['ssp_longitude'] - if dataset_id.name == 'HRV': + if dataset_id['name'] == 'HRV': pdict['nlines'] = self.mda['hrv_number_of_lines'] pdict['ncols'] = self.mda['hrv_number_of_columns'] pdict['a_name'] = 'geos_seviri_hrv' @@ -316,7 +316,7 @@ def get_area_extent(self, dataset_id): elif earth_model == 1: ns_offset = -0.5 we_offset = 0.5 - if dataset_id.name == 'HRV': + if dataset_id['name'] == 'HRV': ns_offset = -1.5 we_offset = 1.5 else: @@ -324,7 +324,7 @@ def get_area_extent(self, dataset_id): 'Unrecognised Earth model: {}'.format(earth_model) ) - if dataset_id.name == 'HRV': + if dataset_id['name'] == 'HRV': grid_origin = data15hd['ImageDescription']['ReferenceGridHRV']['GridOrigin'] center_point = (HRV_NUM_COLUMNS / 2) - 2 coeff = 3 @@ -352,7 +352,7 @@ def get_area_extent(self, dataset_id): # The HRV channel in full disk mode comes in two separate areas, and each area has its own area extent stored # in the trailer. # In Rapid Scanning mode, only the "Lower" area (typically over Europe) is acquired and included in the files. - if (dataset_id.name == 'HRV') and (self.mda['is_full_disk'] or is_rapid_scan): + if (dataset_id['name'] == 'HRV') and (self.mda['is_full_disk'] or is_rapid_scan): # get actual navigation parameters from trailer data data15tr = self.trailer['15TRAILER'] @@ -412,9 +412,9 @@ def get_area_extent(self, dataset_id): def get_dataset(self, dataset_id, dataset_info): """Get the dataset.""" - if dataset_id.name not in self.mda['channel_list']: - raise KeyError('Channel % s not available in the file' % dataset_id.name) - elif dataset_id.name not in ['HRV']: + if dataset_id['name'] not in self.mda['channel_list']: + raise KeyError('Channel % s not available in the file' % dataset_id['name']) + elif dataset_id['name'] not in ['HRV']: shape = (self.mda['number_of_lines'], self.mda['number_of_columns']) # Check if there is only 1 channel in the list as a change @@ -422,7 +422,7 @@ def get_dataset(self, dataset_id, dataset_info): if len(self.mda['channel_list']) == 1: raw = self.dask_array['visir']['line_data'] else: - i = self.mda['channel_list'].index(dataset_id.name) + i = self.mda['channel_list'].index(dataset_id['name']) raw = self.dask_array['visir']['line_data'][:, i, :] data = dec10216(raw.flatten()) @@ -468,8 +468,8 @@ def calibrate(self, data, dataset_id): tic = datetime.now() data15hdr = self.header['15_DATA_HEADER'] - calibration = dataset_id.calibration - channel = dataset_id.name + calibration = dataset_id['calibration'] + channel = dataset_id['name'] # even though all the channels may not be present in the file, # the header does have calibration coefficients for all the channels diff --git a/satpy/readers/seviri_l1b_nc.py b/satpy/readers/seviri_l1b_nc.py index 9c99b3e16a..24ce7b4e7e 100644 --- a/satpy/readers/seviri_l1b_nc.py +++ b/satpy/readers/seviri_l1b_nc.py @@ -34,7 +34,10 @@ class NCSEVIRIFileHandler(BaseFileHandler, SEVIRICalibrationHandler): + """File handler for NC seviri files.""" + def __init__(self, filename, filename_info, filetype_info): + """Init the file handler.""" super(NCSEVIRIFileHandler, self).__init__(filename, filename_info, filetype_info) self.nc = None self.mda = {} @@ -43,13 +46,16 @@ def __init__(self, filename, filename_info, filetype_info): @property def start_time(self): + """Get the start time.""" return self.deltaSt @property def end_time(self): + """Get the end time.""" return self.deltaEnd def _read_file(self): + """Read the file.""" if self.nc is None: self.nc = xr.open_dataset(self.filename, @@ -86,8 +92,8 @@ def _read_file(self): self.south = int(self.nc.attrs['south_most_line']) def get_dataset(self, dataset_id, dataset_info): - - channel = dataset_id.name + """Get the dataset.""" + channel = dataset_id['name'] i = list(CHANNEL_NAMES.values()).index(channel) if (channel == 'HRV'): @@ -115,18 +121,18 @@ def get_dataset(self, dataset_id, dataset_info): # Correct for the scan line order dataset = dataset.sel(y=slice(None, None, -1)) - if dataset_id.calibration == 'counts': + if dataset_id['calibration'] == 'counts': dataset.attrs['_FillValue'] = 0 - if dataset_id.calibration in ['radiance', 'reflectance', 'brightness_temperature']: + if dataset_id['calibration'] in ['radiance', 'reflectance', 'brightness_temperature']: dataset = dataset.where(dataset != 0).astype('float32') dataset = self._convert_to_radiance(dataset, gain, offset) - if dataset_id.calibration == 'reflectance': + if dataset_id['calibration'] == 'reflectance': solar_irradiance = CALIB[int(self.platform_id)][channel]["F"] dataset = self._vis_calibrate(dataset, solar_irradiance) - elif dataset_id.calibration == 'brightness_temperature': + elif dataset_id['calibration'] == 'brightness_temperature': dataset = self._ir_calibrate(dataset, channel, cal_type) dataset.attrs.update(self.nc[dataset_info['nc_key']].attrs) @@ -146,14 +152,14 @@ def get_dataset(self, dataset_id, dataset_info): return dataset def get_area_def(self, dataset_id): - + """Get the area def.""" pdict = {} pdict['a'] = self.mda['projection_parameters']['a'] pdict['b'] = self.mda['projection_parameters']['b'] pdict['h'] = self.mda['projection_parameters']['h'] pdict['ssp_lon'] = self.mda['projection_parameters']['ssp_longitude'] - if dataset_id.name == 'HRV': + if dataset_id['name'] == 'HRV': pdict['nlines'] = self.mda['hrv_number_of_lines'] pdict['ncols'] = self.mda['hrv_number_of_columns'] pdict['a_name'] = 'geosmsg_hrv' @@ -171,7 +177,7 @@ def get_area_def(self, dataset_id): return area def get_area_extent(self, dsid): - + """Get the area extent.""" # following calculations assume grid origin is south-east corner # section 7.2.4 of MSG Level 1.5 Image Data Format Description origins = {0: 'NW', 1: 'SW', 2: 'SE', 3: 'NE'} @@ -214,4 +220,6 @@ def get_area_extent(self, dsid): class NCSEVIRIHRVFileHandler(BaseFileHandler, SEVIRICalibrationHandler): + """HRV filehandler.""" + pass diff --git a/satpy/readers/slstr_l1b.py b/satpy/readers/slstr_l1b.py index 7b9749dbbe..0cebb0b0c8 100644 --- a/satpy/readers/slstr_l1b.py +++ b/satpy/readers/slstr_l1b.py @@ -54,13 +54,15 @@ def __init__(self, filename, filename_info, filetype_info): def get_dataset(self, key, info): """Load a dataset.""" - logger.debug('Reading %s.', key.name) - + logger.debug('Reading %s.', key['name']) + file_key = info['file_key'].format(view=key['view'].name[0], + stripe=key['stripe'].name) try: - variable = self.nc[info['file_key']] + variable = self.nc[file_key] except KeyError: return + info = info.copy() info.update(variable.attrs) variable.attrs = info @@ -92,15 +94,16 @@ def __init__(self, filename, filename_info, filetype_info): 'rows': CHUNK_SIZE}) self.nc = self.nc.rename({'columns': 'x', 'rows': 'y'}) self.channel = filename_info['dataset_name'] - self.stripe = self.filename[-5] - self.view = self.filename[-4] + self.stripe = filename_info['stripe'] + views = {'n': 'nadir', 'o': 'oblique'} + self.view = views[filename_info['view']] cal_file = os.path.join(os.path.dirname(self.filename), 'viscal.nc') self.cal = xr.open_dataset(cal_file, decode_cf=True, mask_and_scale=True, chunks={'views': CHUNK_SIZE}) indices_file = os.path.join(os.path.dirname(self.filename), - 'indices_{}{}.nc'.format(self.stripe, self.view)) + 'indices_{}{}.nc'.format(self.stripe, self.view[0])) self.indices = xr.open_dataset(indices_file, decode_cf=True, mask_and_scale=True, @@ -120,23 +123,25 @@ def _cal_rad(rad, didx, solar_flux=None): def get_dataset(self, key, info): """Load a dataset.""" - if self.channel not in key.name: + if (self.channel not in key['name'] or + self.stripe != key['stripe'].name or + self.view != key['view'].name): return - logger.debug('Reading %s.', key.name) - if key.calibration == 'brightness_temperature': - variable = self.nc['{}_BT_{}{}'.format(self.channel, self.stripe, self.view)] + logger.debug('Reading %s.', key['name']) + if key['calibration'] == 'brightness_temperature': + variable = self.nc['{}_BT_{}{}'.format(self.channel, self.stripe, self.view[0])] else: - variable = self.nc['{}_radiance_{}{}'.format(self.channel, self.stripe, self.view)] + variable = self.nc['{}_radiance_{}{}'.format(self.channel, self.stripe, self.view[0])] radiances = variable units = variable.attrs['units'] - if key.calibration == 'reflectance': + if key['calibration'] == 'reflectance': # TODO take into account sun-earth distance - solar_flux = self.cal[re.sub('_[^_]*$', '', key.name) + '_solar_irradiances'] - d_index = self.indices['detector_{}{}'.format(self.stripe, self.view)] - idx = 0 if self.view == 'n' else 1 # 0: Nadir view, 1: oblique (check). + solar_flux = self.cal[re.sub('_[^_]*$', '', key['name']) + '_solar_irradiances'] + d_index = self.indices['detector_{}{}'.format(self.stripe, self.view[0])] + idx = 0 if self.view[0] == 'n' else 1 # 0: Nadir view, 1: oblique (check). radiances.data = da.map_blocks( self._cal_rad, radiances.data, d_index.data, solar_flux=solar_flux[:, idx].values) @@ -144,6 +149,7 @@ def get_dataset(self, key, info): radiances *= np.pi * 100 units = '%' + info = info.copy() info.update(radiances.attrs) info.update(key.to_dict()) info.update(dict(units=units, @@ -188,7 +194,7 @@ def __init__(self, filename, filename_info, filetype_info): self._end_time = filename_info['end_time'] cart_file = os.path.join( - os.path.dirname(self.filename), 'cartesian_i{}.nc'.format(self.view)) + os.path.dirname(self.filename), 'cartesian_i{}.nc'.format(self.view[0])) self.cart = xr.open_dataset(cart_file, decode_cf=True, mask_and_scale=True, @@ -204,25 +210,29 @@ def __init__(self, filename, filename_info, filetype_info): def get_dataset(self, key, info): """Load a dataset.""" - if not info['view'].startswith(self.view): + if not info['view'].name.startswith(self.view): return - logger.debug('Reading %s.', key.name) + logger.debug('Reading %s.', key['name']) # Check if file_key is specified in the yaml - file_key = info.get('file_key', key.name) + file_key = info['file_key'].format(view=key['view'].name[0]) variable = self.nc[file_key] l_step = self.nc.attrs.get('al_subsampling_factor', 1) c_step = self.nc.attrs.get('ac_subsampling_factor', 16) + if key.get('resolution', 1000) == 500: + l_step *= 2 + c_step *= 2 + if c_step != 1 or l_step != 1: - logger.debug('Interpolating %s.', key.name) + logger.debug('Interpolating %s.', key['name']) # TODO: do it in cartesian coordinates ! pbs at date line and # possible tie_x = self.cartx['x_tx'].data[0, :][::-1] tie_y = self.cartx['y_tx'].data[:, 0] - full_x = self.cart['x_i' + self.view].data - full_y = self.cart['y_i' + self.view].data + full_x = self.cart['x_i' + self.view[0]].data + full_y = self.cart['y_i' + self.view[0]].data variable = variable.fillna(0) @@ -269,18 +279,24 @@ def __init__(self, filename, filename_info, filetype_info): chunks={'columns': CHUNK_SIZE, 'rows': CHUNK_SIZE}) self.nc = self.nc.rename({'columns': 'x', 'rows': 'y'}) - self.stripe = self.filename[-5] - self.view = self.filename[-4] + self.stripe = filename_info['stripe'] + views = {'n': 'nadir', 'o': 'oblique'} + self.view = views[filename_info['view']] # TODO: get metadata from the manifest file (xfdumanifest.xml) self.platform_name = PLATFORM_NAMES[filename_info['mission_id']] self.sensor = 'slstr' def get_dataset(self, key, info): """Load a dataset.""" - logger.debug('Reading %s.', key.name) - - variable = self.nc[key.name] + if (self.stripe != key['stripe'].name or + self.view != key['view'].name): + return + logger.debug('Reading %s.', key['name']) + file_key = info['file_key'].format(view=key['view'].name[0], + stripe=key['stripe'].name) + variable = self.nc[file_key] + info = info.copy() info.update(variable.attrs) info.update(key.to_dict()) info.update(dict(platform_name=self.platform_name, diff --git a/satpy/readers/smos_l2_wind.py b/satpy/readers/smos_l2_wind.py index 5eabc5998b..9e1cc8ea99 100644 --- a/satpy/readers/smos_l2_wind.py +++ b/satpy/readers/smos_l2_wind.py @@ -74,7 +74,6 @@ def get_metadata(self, data, ds_info): def available_datasets(self, configured_datasets=None): """Automatically determine datasets provided by this file.""" - handled_variables = set() # Iterate over dataset contents @@ -102,7 +101,7 @@ def _mask_dataset(self, data): return data def _adjust_lon_coord(self, data): - """Adjust lon coordinate to -180 .. 180 ( not 0 .. 360)""" + """Adjust lon coordinate to -180 .. 180 ( not 0 .. 360).""" data = data.assign_coords(lon=(((data.lon + 180) % 360) - 180)) return data.where(data < 180., data - 360.) @@ -127,14 +126,14 @@ def _remove_time_coordinate(self, data): return data def _roll_dataset_lon_coord(self, data): - """Roll dataset along the lon coordinate""" + """Roll dataset along the lon coordinate.""" if 'lon' in data.dims: data = data.roll(lon=720, roll_coords=True) return data def get_dataset(self, ds_id, ds_info): """Get dataset.""" - data = self[ds_id.name] + data = self[ds_id['name']] data.attrs = self.get_metadata(data, ds_info) data = self._remove_time_coordinate(data) data = self._roll_dataset_lon_coord(data) @@ -150,7 +149,7 @@ def get_dataset(self, ds_id, ds_info): return data def _create_area_extent(self, width, height): - """Create area extent""" + """Create area extent.""" # Creating a meshgrid, not needed actually, but makes it easy to find extremes _lon = self._adjust_lon_coord(self['lon']) _lon = self._roll_dataset_lon_coord(_lon) diff --git a/satpy/readers/tropomi_l2.py b/satpy/readers/tropomi_l2.py index e2fab29251..e632c18af3 100644 --- a/satpy/readers/tropomi_l2.py +++ b/satpy/readers/tropomi_l2.py @@ -145,7 +145,6 @@ def available_datasets(self, configured_datasets=None): 'file_key': var_name, 'coordinates': coordinates, 'file_type': self.filetype_info['file_type'], - 'resolution': None, } yield True, new_info @@ -207,8 +206,8 @@ def prepare_geo(self, bounds_data): def get_dataset(self, ds_id, ds_info): """Get dataset.""" - logger.debug("Getting data for: %s", ds_id.name) - file_key = ds_info.get('file_key', ds_id.name) + logger.debug("Getting data for: %s", ds_id['name']) + file_key = ds_info.get('file_key', ds_id['name']) data = self[file_key] data.attrs = self.get_metadata(data, ds_info) fill_value = data.attrs.get('_FillValue', np.float32(np.nan)) @@ -236,6 +235,6 @@ def get_dataset(self, ds_id, ds_info): if coords_exist: data = data.drop_vars(coords_exist) - if ds_id.name in ['assembled_lat_bounds', 'assembled_lon_bounds']: + if ds_id['name'] in ['assembled_lat_bounds', 'assembled_lon_bounds']: data = self.prepare_geo(data) return data diff --git a/satpy/readers/vaisala_gld360.py b/satpy/readers/vaisala_gld360.py index c06816eb0e..9060b60682 100644 --- a/satpy/readers/vaisala_gld360.py +++ b/satpy/readers/vaisala_gld360.py @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Vaisala Global Lightning Dataset 360 reader +"""Vaisala Global Lightning Dataset 360 reader. Vaisala Global Lightning Dataset GLD360 is data as a service that provides real-time lightning data for accurate and early @@ -43,6 +43,7 @@ class VaisalaGLD360TextFileHandler(BaseFileHandler): """ASCII reader for Vaisala GDL360 data.""" def __init__(self, filename, filename_info, filetype_info): + """Init the file reader.""" super(VaisalaGLD360TextFileHandler, self).__init__(filename, filename_info, filetype_info) names = ['date', 'time', 'latitude', 'longitude', 'power', 'unit'] @@ -56,15 +57,17 @@ def __init__(self, filename, filename_info, filetype_info): @property def start_time(self): + """Get the start time.""" return self.data['datetime'].iloc[0] @property def end_time(self): + """Get the end time.""" return self.data['datetime'].iloc[-1] def get_dataset(self, dataset_id, dataset_info): """Load a dataset.""" - xarr = xr.DataArray(da.from_array(self.data[dataset_id.name], + xarr = xr.DataArray(da.from_array(self.data[dataset_id['name']], chunks=CHUNK_SIZE), dims=["y"]) # Add time, longitude, and latitude as non-dimensional y-coordinates @@ -72,7 +75,7 @@ def get_dataset(self, dataset_id, dataset_info): xarr['longitude'] = ('y', self.data['longitude']) xarr['latitude'] = ('y', self.data['latitude']) - if dataset_id.name == 'power': + if dataset_id['name'] == 'power': # Check that units in the file match the unit specified in the # reader yaml-file if not (self.data.unit == dataset_info['units']).all(): diff --git a/satpy/readers/viirs_compact.py b/satpy/readers/viirs_compact.py index 658a6c6f65..281124c9fd 100644 --- a/satpy/readers/viirs_compact.py +++ b/satpy/readers/viirs_compact.py @@ -151,8 +151,8 @@ def __del__(self): def get_dataset(self, key, info): """Load a dataset.""" - logger.debug('Reading %s.', key.name) - if key.name in chans_dict: + logger.debug('Reading %s.', key['name']) + if key['name'] in chans_dict: m_data = self.read_dataset(key, info) else: m_data = self.read_geo(key, info) @@ -201,16 +201,16 @@ def read_geo(self, key, info): if self.lons is None or self.lats is None: self.lons, self.lats = self.navigate() for pair, fkeys in pairs.items(): - if key.name in pair: + if key['name'] in pair: if (self.cache.get(pair[0]) is None or self.cache.get(pair[1]) is None): angles = self.angles(*fkeys) self.cache[pair[0]], self.cache[pair[1]] = angles - if key.name == pair[0]: - return xr.DataArray(self.cache[pair[0]], name=key.name, + if key['name'] == pair[0]: + return xr.DataArray(self.cache[pair[0]], name=key['name'], attrs=self.mda, dims=('y', 'x')) else: - return xr.DataArray(self.cache[pair[1]], name=key.name, + return xr.DataArray(self.cache[pair[1]], name=key['name'], attrs=self.mda, dims=('y', 'x')) if info.get('standard_name') in ['latitude', 'longitude']: @@ -223,7 +223,7 @@ def read_geo(self, key, info): else: return xr.DataArray(self.lats, attrs=mda, dims=('y', 'x')) - if key.name == 'dnb_moon_illumination_fraction': + if key['name'] == 'dnb_moon_illumination_fraction': mda = self.mda.copy() mda.update(info) return xr.DataArray(da.from_array(self.geostuff["MoonIllumFraction"]), @@ -232,7 +232,7 @@ def read_geo(self, key, info): def read_dataset(self, dataset_key, info): """Read a dataset.""" h5f = self.h5f - channel = chans_dict[dataset_key.name] + channel = chans_dict[dataset_key['name']] chan_dict = dict([(key.split("-")[1], key) for key in h5f["All_Data"].keys() if key.startswith("VIIRS")]) @@ -240,7 +240,7 @@ def read_dataset(self, dataset_key, info): h5rads = h5f["All_Data"][chan_dict[channel]]["Radiance"] chunks = h5rads.chunks or CHUNK_SIZE rads = xr.DataArray(da.from_array(h5rads, chunks=chunks), - name=dataset_key.name, + name=dataset_key['name'], dims=['y', 'x']).astype(np.float32) h5attrs = h5rads.attrs scans = h5f["All_Data"]["NumberOfScans"][0] @@ -263,9 +263,9 @@ def read_dataset(self, dataset_key, info): logger.info("Missing attribute for scaling of %s.", channel) pass unit = "W m-2 sr-1 μm-1" - if dataset_key.calibration == 'counts': + if dataset_key['calibration'] == 'counts': raise NotImplementedError("Can't get counts from this data") - if dataset_key.calibration in ['reflectance', 'brightness_temperature']: + if dataset_key['calibration'] in ['reflectance', 'brightness_temperature']: # do calibrate try: # First guess: VIS or NIR data @@ -291,7 +291,7 @@ def read_dataset(self, dataset_key, info): except KeyError: logger.warning("Calibration failed.") - elif dataset_key.calibration != 'radiance': + elif dataset_key['calibration'] != 'radiance': raise ValueError("Calibration parameter should be radiance, " "reflectance or brightness_temperature") rads = rads.clip(min=0) diff --git a/satpy/readers/viirs_edr_active_fires.py b/satpy/readers/viirs_edr_active_fires.py index a762284d70..5ef58d681c 100644 --- a/satpy/readers/viirs_edr_active_fires.py +++ b/satpy/readers/viirs_edr_active_fires.py @@ -56,7 +56,7 @@ def get_dataset(self, dsid, dsinfo): Dask DataArray: Data """ - key = dsinfo.get('file_key', dsid.name).format(variable_prefix=self.prefix) + key = dsinfo.get('file_key', dsid['name']).format(variable_prefix=self.prefix) data = self[key] # rename "phoney dims" data = data.rename(dict(zip(data.dims, ['y', 'x']))) @@ -119,7 +119,7 @@ def __init__(self, filename, filename_info, filetype_info): def get_dataset(self, dsid, dsinfo): """Get requested data as DataArray.""" - ds = self[dsid.name].to_dask_array(lengths=True) + ds = self[dsid['name']].to_dask_array(lengths=True) data = xr.DataArray(ds, dims=("y",), attrs={"platform_name": self.platform_name, "sensor": "VIIRS"}) for key in ('units', 'standard_name', 'flag_meanings', 'flag_values', '_FillValue'): # we only want to add information that isn't present already diff --git a/satpy/readers/viirs_edr_flood.py b/satpy/readers/viirs_edr_flood.py index a39aa3899e..baf655e743 100644 --- a/satpy/readers/viirs_edr_flood.py +++ b/satpy/readers/viirs_edr_flood.py @@ -67,7 +67,7 @@ def get_metadata(self, data, ds_info): def get_dataset(self, ds_id, ds_info): """Get dataset.""" - data = self[ds_id.name] + data = self[ds_id['name']] data.attrs = self.get_metadata(data, ds_info) @@ -84,7 +84,7 @@ def get_dataset(self, ds_id, ds_info): def get_area_def(self, ds_id): """Get area definition.""" - data = self[ds_id.name] + data = self[ds_id['name']] proj_dict = { 'proj': 'latlong', diff --git a/satpy/readers/viirs_l1b.py b/satpy/readers/viirs_l1b.py index 16f3c0e372..37189a1aeb 100644 --- a/satpy/readers/viirs_l1b.py +++ b/satpy/readers/viirs_l1b.py @@ -98,7 +98,7 @@ def adjust_scaling_factors(self, factors, file_units, output_units): def get_shape(self, ds_id, ds_info): """Get shape.""" - var_path = ds_info.get('file_key', 'observation_data/{}'.format(ds_id.name)) + var_path = ds_info.get('file_key', 'observation_data/{}'.format(ds_id['name'])) return self.get(var_path + '/shape', 1) @property @@ -119,7 +119,7 @@ def _get_dataset_file_units(self, dataset_id, ds_info, var_path): if file_units == "none": file_units = "1" - if dataset_id.calibration == 'radiance' and ds_info['units'] == 'W m-2 um-1 sr-1': + if dataset_id.get('calibration') == 'radiance' and ds_info['units'] == 'W m-2 um-1 sr-1': rad_units_path = var_path + '/attr/radiance_units' if rad_units_path in self: if file_units is None: @@ -134,7 +134,7 @@ def _get_dataset_file_units(self, dataset_id, ds_info, var_path): return file_units def _get_dataset_valid_range(self, dataset_id, ds_info, var_path): - if dataset_id.calibration == 'radiance' and ds_info['units'] == 'W m-2 um-1 sr-1': + if dataset_id.get('calibration') == 'radiance' and ds_info['units'] == 'W m-2 um-1 sr-1': rad_units_path = var_path + '/attr/radiance_units' if rad_units_path in self: # we are getting a reflectance band but we want the radiance values @@ -172,7 +172,7 @@ def _get_dataset_valid_range(self, dataset_id, ds_info, var_path): def get_metadata(self, dataset_id, ds_info): """Get metadata.""" - var_path = ds_info.get('file_key', 'observation_data/{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', 'observation_data/{}'.format(dataset_id['name'])) shape = self.get_shape(dataset_id, ds_info) file_units = self._get_dataset_file_units(dataset_id, ds_info, var_path) @@ -198,12 +198,11 @@ def get_metadata(self, dataset_id, ds_info): def get_dataset(self, dataset_id, ds_info): """Get dataset.""" - var_path = ds_info.get('file_key', 'observation_data/{}'.format(dataset_id.name)) + var_path = ds_info.get('file_key', 'observation_data/{}'.format(dataset_id['name'])) metadata = self.get_metadata(dataset_id, ds_info) - shape = metadata['shape'] valid_min, valid_max, scale_factor, scale_offset = self._get_dataset_valid_range(dataset_id, ds_info, var_path) - if dataset_id.calibration == 'radiance' and ds_info['units'] == 'W m-2 um-1 sr-1': + if dataset_id.get('calibration') == 'radiance' and ds_info['units'] == 'W m-2 um-1 sr-1': data = self[var_path] elif ds_info.get('units') == '%': data = self[var_path] @@ -217,8 +216,6 @@ def get_dataset(self, dataset_id, ds_info): coords = data.coords data.data = self[lut_var_path].data[index_arr.ravel()].reshape(data.shape) data = data.assign_coords(**coords) - elif shape == 1: - data = self[var_path] else: data = self[var_path] data.attrs.update(metadata) diff --git a/satpy/readers/viirs_sdr.py b/satpy/readers/viirs_sdr.py index a0dd0348b9..e073578fc3 100644 --- a/satpy/readers/viirs_sdr.py +++ b/satpy/readers/viirs_sdr.py @@ -219,15 +219,15 @@ def get_file_units(self, dataset_id, ds_info): # Guess the file units if we need to (normally we would get this from # the file) if file_units is None: - if dataset_id.calibration == 'radiance': - if "dnb" in dataset_id.name.lower(): + if dataset_id.get('calibration') == 'radiance': + if "dnb" in dataset_id['name'].lower(): return 'W m-2 sr-1' else: return 'W cm-2 sr-1' - elif dataset_id.calibration == 'reflectance': + elif dataset_id.get('calibration') == 'reflectance': # CF compliant unit for dimensionless file_units = "1" - elif dataset_id.calibration == 'brightness_temperature': + elif dataset_id.get('calibration') == 'brightness_temperature': file_units = "K" else: LOG.debug("Unknown units for file key '%s'", dataset_id) @@ -274,9 +274,9 @@ def _generate_file_key(self, ds_id, ds_info, factors=False): 'radiance': 'Radiance', 'reflectance': 'Reflectance', 'brightness_temperature': 'BrightnessTemperature', - }.get(ds_id.calibration) + }.get(ds_id.get('calibration')) var_path = var_path.format(calibration=calibration, dataset_group=DATASET_KEYS[ds_info['dataset_group']]) - if ds_id.name in ['dnb_longitude', 'dnb_latitude']: + if ds_id['name'] in ['dnb_longitude', 'dnb_latitude']: if self.use_tc is True: return var_path + '_TC' elif self.use_tc is None and var_path + '_TC' in self.file_content: @@ -571,7 +571,7 @@ def _get_file_handlers(self, dsid): if set(fh.datasets) & set(ds_info['dataset_groups'])] if not fhs: LOG.warning("Required file type '%s' not found or loaded for " - "'%s'", ds_info['file_type'], dsid.name) + "'%s'", ds_info['file_type'], dsid['name']) else: if len(set(ds_info['dataset_groups']) & set(['GITCO', 'GIMGO', 'GMTCO', 'GMODO'])) > 1: fhs = self.get_right_geo_fhs(dsid, fhs) diff --git a/satpy/readers/virr_l1b.py b/satpy/readers/virr_l1b.py index 4854f6626f..a572d245b8 100644 --- a/satpy/readers/virr_l1b.py +++ b/satpy/readers/virr_l1b.py @@ -87,7 +87,7 @@ def __init__(self, filename, filename_info, filetype_info): def get_dataset(self, dataset_id, ds_info): """Create DataArray from file content for `dataset_id`.""" - file_key = self.geolocation_prefix + ds_info.get('file_key', dataset_id.name) + file_key = self.geolocation_prefix + ds_info.get('file_key', dataset_id['name']) if self.platform_id == 'FY3B': file_key = file_key.replace('Data/', '') data = self[file_key] diff --git a/satpy/readers/yaml_reader.py b/satpy/readers/yaml_reader.py index f49af26f1a..b5f372f01c 100644 --- a/satpy/readers/yaml_reader.py +++ b/satpy/readers/yaml_reader.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License along with # satpy. If not, see . """Base classes and utilities for all readers configured by YAML files.""" + import glob import itertools import logging @@ -39,7 +40,7 @@ from pyresample.boundary import AreaDefBoundary, Boundary from satpy.resample import get_area_def from satpy.config import recursive_dict_update -from satpy.dataset import DATASET_KEYS, DatasetID +from satpy.dataset import DataQuery, get_keys_from_config, default_id_keys_config, default_co_keys_config, DataID from satpy.readers import DatasetDict, get_key from satpy.resample import add_crs_xy_coords from trollsift.parser import globify, parse @@ -98,7 +99,6 @@ def __init__(self, config_files): for config_file in config_files: with open(config_file) as fd: self.config = recursive_dict_update(self.config, yaml.load(fd, Loader=UnsafeLoader)) - self.info = self.config['reader'] self.name = self.info['name'] self.file_patterns = [] @@ -113,6 +113,8 @@ def __init__(self, config_files): if 'sensors' in self.info and not isinstance(self.info['sensors'], (list, tuple)): self.info['sensors'] = [self.info['sensors']] self.datasets = self.config.get('datasets', {}) + self._id_keys = self.info.get('data_identification_keys', default_id_keys_config) + self._co_keys = self.info.get('coord_identification_keys', default_co_keys_config) self.info['filenames'] = [] self.all_ids = {} self.load_ds_ids_from_config() @@ -124,18 +126,18 @@ def sensor_names(self): @property def all_dataset_ids(self): - """Get DatasetIDs of all datasets known to this reader.""" + """Get DataIDs of all datasets known to this reader.""" return self.all_ids.keys() @property def all_dataset_names(self): """Get names of all datasets known to this reader.""" # remove the duplicates from various calibration and resolutions - return set(ds_id.name for ds_id in self.all_dataset_ids) + return set(ds_id['name'] for ds_id in self.all_dataset_ids) @property def available_dataset_ids(self): - """Get DatasetIDs that are loadable by this reader.""" + """Get DataIDs that are loadable by this reader.""" logger.warning( "Available datasets are unknown, returning all datasets...") return self.all_dataset_ids @@ -143,7 +145,7 @@ def available_dataset_ids(self): @property def available_dataset_names(self): """Get names of datasets that are loadable by this reader.""" - return (ds_id.name for ds_id in self.available_dataset_ids) + return (ds_id['name'] for ds_id in self.available_dataset_ids) @property @abstractmethod @@ -229,7 +231,7 @@ def select_files_from_pathnames(self, filenames): return selected_filenames def get_dataset_key(self, key, **kwargs): - """Get the fully qualified `DatasetID` matching `key`. + """Get the fully qualified `DataID` matching `key`. See `satpy.readers.get_key` for more information about kwargs. @@ -245,20 +247,19 @@ def load_ds_ids_from_config(self): if 'coordinates' in dataset and \ isinstance(dataset['coordinates'], list): dataset['coordinates'] = tuple(dataset['coordinates']) + id_keys = get_keys_from_config(self._id_keys, dataset) + # Build each permutation/product of the dataset id_kwargs = [] - for key in DATASET_KEYS: - val = dataset.get(key) - if key in ["wavelength", "modifiers"] and isinstance(val, - list): + for key, idval in id_keys.items(): + val = dataset.get(key, idval.get('default') if idval is not None else None) + val_type = None + if idval is not None: + val_type = idval.get('type') + if val_type is not None and issubclass(val_type, tuple): # special case: wavelength can be [min, nominal, max] # but is still considered 1 option - # it also needs to be a tuple so it can be used in - # a dictionary key (DatasetID) - id_kwargs.append((tuple(val),)) - elif key == "modifiers" and val is None: - # empty modifiers means no modifiers applied - id_kwargs.append((tuple(),)) + id_kwargs.append((val, )) elif isinstance(val, (list, tuple, set)): # this key has multiple choices # (ex. 250 meter, 500 meter, 1000 meter resolutions) @@ -270,19 +271,18 @@ def load_ds_ids_from_config(self): # item iterable id_kwargs.append((val,)) for id_params in itertools.product(*id_kwargs): - dsid = DatasetID(*id_params) + dsid = DataID(id_keys, **dict(zip(id_keys, id_params))) ids.append(dsid) # create dataset infos specifically for this permutation ds_info = dataset.copy() - for key in DATASET_KEYS: + for key in dsid.keys(): if isinstance(ds_info.get(key), dict): - ds_info.update(ds_info[key][getattr(dsid, key)]) + ds_info.update(ds_info[key][dsid.get(key)]) # this is important for wavelength which was converted # to a tuple - ds_info[key] = getattr(dsid, key) + ds_info[key] = dsid.get(key) self.all_ids[dsid] = ds_info - return ids @@ -332,7 +332,7 @@ def sensor_names(self): @property def available_dataset_ids(self): - """Get DatasetIDs that are loadable by this reader.""" + """Get DataIDs that are loadable by this reader.""" return self.available_ids.keys() @property @@ -622,7 +622,9 @@ def update_ds_ids_from_file_handlers(self): ds_info['coordinates'] = tuple(ds_info['coordinates']) ds_info.setdefault('modifiers', tuple()) # default to no mods - ds_id = DatasetID.from_dict(ds_info) + + # Create DataID for this dataset + ds_id = DataID(self._id_keys, **ds_info) # all datasets new_ids[ds_id] = ds_info # available datasets @@ -696,11 +698,13 @@ def _get_coordinates_for_dataset_key(self, dsid): for cinfo in ds_info.get('coordinates', []): if not isinstance(cinfo, dict): cinfo = {'name': cinfo} - - cinfo['resolution'] = ds_info['resolution'] - if 'polarization' in ds_info: - cinfo['polarization'] = ds_info['polarization'] - cid = DatasetID(**cinfo) + for key in self._co_keys: + if key == 'name': + continue + if key in ds_info: + if ds_info[key] is not None: + cinfo[key] = ds_info[key] + cid = DataQuery.from_dict(cinfo) cids.append(self.get_dataset_key(cid)) return cids @@ -720,7 +724,7 @@ def _get_file_handlers(self, dsid): filetype = self._preferred_filetype(ds_info['file_type']) if filetype is None: logger.warning("Required file type '%s' not found or loaded for " - "'%s'", ds_info['file_type'], dsid.name) + "'%s'", ds_info['file_type'], dsid['name']) else: return self.file_handlers[filetype] @@ -813,23 +817,23 @@ def _load_ancillary_variables(self, datasets, **kwargs): for dataset in datasets.values(): new_vars = [] for av_id in dataset.attrs.get('ancillary_variables', []): - if isinstance(av_id, DatasetID): + if isinstance(av_id, DataID): new_vars.append(datasets[av_id]) else: new_vars.append(av_id) dataset.attrs['ancillary_variables'] = new_vars def get_dataset_key(self, key, available_only=False, **kwargs): - """Get the fully qualified `DatasetID` matching `key`. + """Get the fully qualified `DataID` matching `key`. - This will first search through available DatasetIDs, datasets that + This will first search through available DataIDs, datasets that should be possible to load, and fallback to "known" datasets, those that are configured but aren't loadable from the provided files. Providing ``available_only=True`` will stop this fallback behavior and raise a ``KeyError`` exception if no available dataset is found. Args: - key (str, float, DatasetID): Key to search for in this reader. + key (str, float, DataID, DataQuery): Key to search for in this reader. available_only (bool): Search only loadable datasets for the provided key. Loadable datasets are always searched first, but if ``available_only=False`` (default) then all known @@ -838,7 +842,7 @@ def get_dataset_key(self, key, available_only=False, **kwargs): kwargs. Returns: - Best matching DatasetID to the provided ``key``. + Best matching DataID to the provided ``key``. Raises: KeyError: if no key match is found. diff --git a/satpy/resample.py b/satpy/resample.py index 1f7be37e84..8ee33ff31b 100644 --- a/satpy/resample.py +++ b/satpy/resample.py @@ -907,7 +907,7 @@ def _move_existing_caches(cache_dir, filename): import shutil old_cache_dir = os.path.join(cache_dir, 'moved_by_satpy') try: - os.mkdir(old_cache_dir) + os.makedirs(old_cache_dir) except FileExistsError: pass try: @@ -1232,6 +1232,7 @@ def compute(self, data, fill_value=np.nan, categories=None, **kwargs): } +# deepcode ignore PythonSameEvalBinaryExpressiontrue: PRBaseResampler is None only on import errors if PRBaseResampler is None: PRBaseResampler = BaseResampler diff --git a/satpy/scene.py b/satpy/scene.py index bc961a0146..819237cfcb 100644 --- a/satpy/scene.py +++ b/satpy/scene.py @@ -22,7 +22,7 @@ from satpy.composites import CompositorLoader, IncompatibleAreas from satpy.config import get_environ_config_dir -from satpy.dataset import (DatasetID, MetadataObject, dataset_walker, +from satpy.dataset import (DataQuery, DataID, MetadataObject, dataset_walker, replace_anc, combine_metadata) from satpy.node import DependencyTree from satpy.readers import DatasetDict, load_readers @@ -158,7 +158,7 @@ def __init__(self, filenames=None, reader=None, filter_parameters=None, reader_k self.resamplers = {} def _ipython_key_completions_(self): - return [x.name for x in self.datasets.keys()] + return [x['name'] for x in self.datasets.keys()] def _compute_metadata_from_readers(self): """Determine pieces of metadata from the readers loaded.""" @@ -207,7 +207,7 @@ def end_time(self): @property def missing_datasets(self): - """Set of DatasetIDs that have not been successfully loaded.""" + """Set of DataIDs that have not been successfully loaded.""" return set(self.wishlist) - set(self.datasets.keys()) def _compare_areas(self, datasets=None, compare_func=max): @@ -284,7 +284,7 @@ def min_area(self, datasets=None): return self._compare_areas(datasets=datasets, compare_func=min) def available_dataset_ids(self, reader_name=None, composites=False): - """Get DatasetIDs of loadable datasets. + """Get DataIDs of loadable datasets. This can be for all readers loaded by this Scene or just for ``reader_name`` if specified. @@ -315,7 +315,7 @@ def available_dataset_ids(self, reader_name=None, composites=False): def available_dataset_names(self, reader_name=None, composites=False): """Get the list of the names of the available datasets.""" - return sorted(set(x.name for x in self.available_dataset_ids( + return sorted(set(x['name'] for x in self.available_dataset_ids( reader_name=reader_name, composites=composites))) def all_dataset_ids(self, reader_name=None, composites=False): @@ -349,7 +349,7 @@ def all_dataset_names(self, reader_name=None, composites=False): product for a particular reader. """ - return sorted(set(x.name for x in self.all_dataset_ids( + return sorted(set(x['name'] for x in self.all_dataset_ids( reader_name=reader_name, composites=composites))) def _check_known_composites(self, available_only=False): @@ -362,14 +362,14 @@ def _check_known_composites(self, available_only=False): dep_tree = DependencyTree(self.readers, sensor_comps, mods, available_only=True) # ignore inline compositor dependencies starting with '_' comps = (comp for comp_dict in sensor_comps.values() - for comp in comp_dict.keys() if not comp.name.startswith('_')) + for comp in comp_dict.keys() if not comp['name'].startswith('_')) # make sure that these composites are even create-able by these readers all_comps = set(comps) - # find_dependencies will update the all_comps set with DatasetIDs + # find_dependencies will update the all_comps set with DataIDs dep_tree.find_dependencies(all_comps) available_comps = set(x.name for x in dep_tree.trunk()) # get rid of modified composites that are in the trunk - return sorted(available_comps & set(all_comps)) + return sorted(available_comps & all_comps) def available_composite_ids(self): """Get names of composites that can be generated from the available datasets.""" @@ -377,7 +377,7 @@ def available_composite_ids(self): def available_composite_names(self): """All configured composites known to this Scene.""" - return sorted(set(x.name for x in self.available_composite_ids())) + return sorted(set(x['name'] for x in self.available_composite_ids())) def all_composite_ids(self): """Get all IDs for configured composites.""" @@ -385,7 +385,7 @@ def all_composite_ids(self): def all_composite_names(self): """Get all names for all configured composites.""" - return sorted(set(x.name for x in self.all_composite_ids())) + return sorted(set(x['name'] for x in self.all_composite_ids())) def all_modifier_names(self): """Get names of configured modifier objects.""" @@ -409,13 +409,13 @@ def iter_by_area(self): datasets_by_area = {} for ds in self: a = ds.attrs.get('area') - datasets_by_area.setdefault(a, []).append( - DatasetID.from_dict(ds.attrs)) + dsid = DataID.from_dataarray(ds) + datasets_by_area.setdefault(a, []).append(dsid) return datasets_by_area.items() def keys(self, **kwargs): - """Get DatasetID keys for the underlying data container.""" + """Get DataID keys for the underlying data container.""" return self.datasets.keys(**kwargs) def values(self): @@ -426,7 +426,7 @@ def copy(self, datasets=None): """Create a copy of the Scene including dependency information. Args: - datasets (list, tuple): `DatasetID` objects for the datasets + datasets (list, tuple): `DataID` objects for the datasets to include in the new Scene object. """ @@ -442,8 +442,7 @@ def copy(self, datasets=None): if not datasets: new_scn.wishlist = self.wishlist.copy() else: - new_scn.wishlist = set([DatasetID.from_dict(ds.attrs) - for ds in new_scn]) + new_scn.wishlist = set(ds_id for ds_id in new_scn.keys()) return new_scn @property @@ -471,7 +470,7 @@ def _slice_area_from_bbox(self, src_area, dst_area, ll_bbox=None, crs = src_area.crs if hasattr(src_area, 'crs') else src_area.proj_dict dst_area = AreaDefinition( 'crop_area', 'crop_area', 'crop_xy', - crs, src_area.x_size, src_area.y_size, + crs, src_area.width, src_area.height, xy_bbox) x_slice, y_slice = src_area.get_area_slices(dst_area) return src_area[y_slice, x_slice], y_slice, x_slice @@ -481,11 +480,12 @@ def _slice_datasets(self, dataset_ids, slice_key, new_area, area_only=True): new_datasets = {} datasets = (self[ds_id] for ds_id in dataset_ids) for ds, parent_ds in dataset_walker(datasets): - ds_id = DatasetID.from_dict(ds.attrs) + ds_id = DataID.from_dataarray(ds) # handle ancillary variables pres = None if parent_ds is not None: - pres = new_datasets[DatasetID.from_dict(parent_ds.attrs)] + parent_dsid = DataID.from_dataarray(parent_ds) + pres = new_datasets[parent_dsid] if ds_id in new_datasets: replace_anc(ds, pres) continue @@ -548,7 +548,7 @@ def crop(self, area=None, ll_bbox=None, xy_bbox=None, dataset_ids=None): longitude and Y is latitude. xy_bbox (tuple, list): Same as `ll_bbox` but elements are in projection units. - dataset_ids (iterable): DatasetIDs to include in the returned + dataset_ids (iterable): DataIDs to include in the returned `Scene`. Defaults to all datasets. This method will attempt to intelligently slice the data to preserve @@ -623,7 +623,7 @@ def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean' """Create an aggregated version of the Scene. Args: - dataset_ids (iterable): DatasetIDs to include in the returned + dataset_ids (iterable): DataIDs to include in the returned `Scene`. Defaults to all datasets. func (string): Function to apply on each aggregation window. One of 'mean', 'sum', 'min', 'max', 'median', 'argmin', @@ -674,7 +674,7 @@ def get(self, key, default=None): def __getitem__(self, key): """Get a dataset or create a new 'slice' of the Scene.""" - if isinstance(key, tuple) and not isinstance(key, DatasetID): + if isinstance(key, tuple): return self.slice(key) return self.datasets[key] @@ -727,7 +727,7 @@ def _get_prereq_datasets(self, comp_id, prereq_nodes, keepables, skip=False): """Get a composite's prerequisites, generating them if needed. Args: - comp_id (DatasetID): DatasetID for the composite whose + comp_id (DataID): DataID for the composite whose prerequisites are being collected. prereq_nodes (sequence of Nodes): Prerequisites to collect keepables (set): `set` to update if any prerequisites can't @@ -836,11 +836,10 @@ def _generate_composite(self, comp_node, keepables): composite = compositor(prereq_datasets, optional_datasets=optional_datasets, **self.attrs) - - cid = DatasetID.from_dict(composite.attrs) - + cid = DataID.new_id_from_dataarray(composite) self.datasets[cid] = composite - # update the node with the computed DatasetID + + # update the node with the computed DataID if comp_node.name in self.wishlist: self.wishlist.remove(comp_node.name) self.wishlist.add(cid) @@ -903,7 +902,7 @@ def unload(self, keepables=None): generate composites that have yet to be generated. Args: - keepables (iterable): DatasetIDs to keep whether they are needed + keepables (iterable): DataIDs to keep whether they are needed or not. """ @@ -914,28 +913,28 @@ def unload(self, keepables=None): LOG.debug("Unloading dataset: %r", ds_id) del self.datasets[ds_id] - def load(self, wishlist, calibration=None, resolution=None, - polarization=None, level=None, generate=True, unload=True, + def load(self, wishlist, calibration='*', resolution='*', + polarization='*', level='*', generate=True, unload=True, **kwargs): """Read and generate requested datasets. - When the `wishlist` contains `DatasetID` objects they can either be - fully-specified `DatasetID` objects with every parameter specified + When the `wishlist` contains `DataQuery` objects they can either be + fully-specified `DataQuery` objects with every parameter specified or they can not provide certain parameters and the "best" parameter will be chosen. For example, if a dataset is available in multiple - resolutions and no resolution is specified in the wishlist's DatasetID + resolutions and no resolution is specified in the wishlist's DataQuery then the highest (smallest number) resolution will be chosen. Loaded `DataArray` objects are created and stored in the Scene object. Args: - wishlist (iterable): List of names (str), wavelengths (float), or - DatasetID objects of the requested datasets - to load. See `available_dataset_ids()` for - what datasets are available. + wishlist (iterable): List of names (str), wavelengths (float), + DataQuery objects or DataID of the requested + datasets to load. See `available_dataset_ids()` + for what datasets are available. calibration (list, str): Calibration levels to limit available datasets. This is a shortcut to - having to list each DatasetID in + having to list each DataQuery/DataID in `wishlist`. resolution (list | float): Resolution to limit available datasets. This is a shortcut similar to @@ -958,13 +957,13 @@ def load(self, wishlist, calibration=None, resolution=None, if isinstance(wishlist, str): raise TypeError("'load' expects a list of datasets, got a string.") dataset_keys = set(wishlist) - needed_datasets = (self.wishlist | dataset_keys) - \ - set(self.datasets.keys()) + needed_datasets = (self.wishlist | dataset_keys) - set(self.datasets.keys()) + query = DataQuery(calibration=calibration, + polarization=polarization, + resolution=resolution, + level=level) unknown = self.dep_tree.find_dependencies(needed_datasets, - calibration=calibration, - polarization=polarization, - resolution=resolution, - level=level) + query) self.wishlist |= needed_datasets if unknown: unknown_str = ", ".join(map(str, unknown)) @@ -991,8 +990,8 @@ def _slice_data(self, source_area, slices, dataset): """Slice the data to reduce it.""" slice_x, slice_y = slices dataset = dataset.isel(x=slice_x, y=slice_y) - assert ('x', source_area.x_size) in dataset.sizes.items() - assert ('y', source_area.y_size) in dataset.sizes.items() + assert ('x', source_area.width) in dataset.sizes.items() + assert ('y', source_area.height) in dataset.sizes.items() dataset.attrs['area'] = source_area return dataset @@ -1019,10 +1018,10 @@ def _resampled_scene(self, new_scn, destination_area, reduce_data=True, resamplers = {} reductions = {} for dataset, parent_dataset in dataset_walker(datasets): - ds_id = DatasetID.from_dict(dataset.attrs) + ds_id = DataID.from_dataarray(dataset) pres = None if parent_dataset is not None: - pres = new_datasets[DatasetID.from_dict(parent_dataset.attrs)] + pres = new_datasets[DataID.from_dataarray(parent_dataset)] if ds_id in new_datasets: replace_anc(new_datasets[ds_id], pres) if ds_id in new_scn.datasets: @@ -1083,7 +1082,7 @@ def resample(self, destination=None, datasets=None, generate=True, resample to. If not specified then the area returned by `Scene.max_area()` will be used. datasets (list): Limit datasets to resample to these specified - `DatasetID` objects . By default all currently loaded + data arrays. By default all currently loaded datasets are resampled. generate (bool): Generate any requested composites that could not be previously due to incompatible areas (default: True). @@ -1142,9 +1141,9 @@ def show(self, dataset_id, overlay=None): Show dataset on screen as an image, possibly with an overlay. Args: - dataset_id (DatasetID or str): - Either a DatasetID or a string representing a DatasetID, that - has been previously loaded using Scene.load. + dataset_id (DataID, DataQuery or str): + Either a DataID, a DataQuery or a string, that refers to a data + array that has been previously loaded using Scene.load. overlay (dict, optional): Add an overlay before showing the image. The keys/values for this dictionary are as the arguments for @@ -1204,7 +1203,7 @@ def to_geoviews(self, gvtype=None, datasets=None, kdims=None, vdims=None, dynami else: gvds = gv.Dataset(ds) - if "latitude" in ds.coords.keys(): + if "latitude" in ds.coords: gview = gvds.to(gv.QuadMesh, kdims=["longitude", "latitude"], vdims=vdims, dynamic=dynamic) else: gview = gvds.to(gvtype, kdims=["x", "y"], vdims=vdims, dynamic=dynamic) @@ -1257,8 +1256,8 @@ def save_dataset(self, dataset_id, filename=None, writer=None, """Save the ``dataset_id`` to file using ``writer``. Args: - dataset_id (str or Number or DatasetID): Identifier for the - dataset to save to disk. + dataset_id (str or Number or DataID or DataQuery): Identifier for + the dataset to save to disk. filename (str): Optionally specify the filename to save this dataset to. It may include string formatting patterns that will be filled in by dataset diff --git a/satpy/tests/compositor_tests/test_ahi.py b/satpy/tests/compositor_tests/test_ahi.py index 7a4ae0aaf6..f406add169 100644 --- a/satpy/tests/compositor_tests/test_ahi.py +++ b/satpy/tests/compositor_tests/test_ahi.py @@ -21,7 +21,6 @@ class TestAHIComposites(unittest.TestCase): - """Test AHI-specific composites.""" def test_load_composite_yaml(self): diff --git a/satpy/tests/compositor_tests/test_viirs.py b/satpy/tests/compositor_tests/test_viirs.py index 69aa045061..4c68ab51af 100644 --- a/satpy/tests/compositor_tests/test_viirs.py +++ b/satpy/tests/compositor_tests/test_viirs.py @@ -252,12 +252,12 @@ def test_reflectance_corrector_abi(self): import dask.array as da import numpy as np from satpy.composites.viirs import ReflectanceCorrector - from satpy import DatasetID + from satpy.tests.utils import make_dsq ref_cor = ReflectanceCorrector(dem_filename='_fake.hdf', optional_prerequisites=[ - DatasetID(name='satellite_azimuth_angle'), - DatasetID(name='satellite_zenith_angle'), - DatasetID(name='solar_azimuth_angle'), - DatasetID(name='solar_zenith_angle')], name='C01', prerequisites=[], + make_dsq(name='satellite_azimuth_angle'), + make_dsq(name='satellite_zenith_angle'), + make_dsq(name='solar_azimuth_angle'), + make_dsq(name='solar_zenith_angle')], name='C01', prerequisites=[], wavelength=(0.45, 0.47, 0.49), resolution=1000, calibration='reflectance', modifiers=('sunz_corrected', 'rayleigh_corrected_crefl',), sensor='abi') @@ -269,13 +269,12 @@ def test_reflectance_corrector_abi(self): self.assertEqual(ref_cor.attrs['sensor'], 'abi') self.assertEqual(ref_cor.attrs['prerequisites'], []) self.assertEqual(ref_cor.attrs['optional_prerequisites'], [ - DatasetID(name='satellite_azimuth_angle'), - DatasetID(name='satellite_zenith_angle'), - DatasetID(name='solar_azimuth_angle'), - DatasetID(name='solar_zenith_angle')]) + make_dsq(name='satellite_azimuth_angle'), + make_dsq(name='satellite_zenith_angle'), + make_dsq(name='solar_azimuth_angle'), + make_dsq(name='solar_zenith_angle')]) area, dnb = self.data_area_ref_corrector() - print(dnb.compute()) c01 = xr.DataArray(dnb, dims=('y', 'x'), attrs={'satellite_longitude': -89.5, 'satellite_latitude': 0.0, @@ -327,12 +326,12 @@ def test_reflectance_corrector_viirs(self): import numpy as np import datetime from satpy.composites.viirs import ReflectanceCorrector - from satpy import DatasetID + from satpy.tests.utils import make_dsq ref_cor = ReflectanceCorrector(dem_filename='_fake.hdf', optional_prerequisites=[ - DatasetID(name='satellite_azimuth_angle'), - DatasetID(name='satellite_zenith_angle'), - DatasetID(name='solar_azimuth_angle'), - DatasetID(name='solar_zenith_angle')], + make_dsq(name='satellite_azimuth_angle'), + make_dsq(name='satellite_zenith_angle'), + make_dsq(name='solar_azimuth_angle'), + make_dsq(name='solar_zenith_angle')], name='I01', prerequisites=[], wavelength=(0.6, 0.64, 0.68), resolution=371, calibration='reflectance', modifiers=('sunz_corrected_iband', 'rayleigh_corrected_crefl_iband'), @@ -346,10 +345,10 @@ def test_reflectance_corrector_viirs(self): self.assertEqual(ref_cor.attrs['sensor'], 'viirs') self.assertEqual(ref_cor.attrs['prerequisites'], []) self.assertEqual(ref_cor.attrs['optional_prerequisites'], [ - DatasetID(name='satellite_azimuth_angle'), - DatasetID(name='satellite_zenith_angle'), - DatasetID(name='solar_azimuth_angle'), - DatasetID(name='solar_zenith_angle')]) + make_dsq(name='satellite_azimuth_angle'), + make_dsq(name='satellite_zenith_angle'), + make_dsq(name='solar_azimuth_angle'), + make_dsq(name='solar_zenith_angle')]) area, dnb = self.data_area_ref_corrector() @@ -405,11 +404,11 @@ def test_reflectance_corrector_modis(self): import numpy as np import datetime from satpy.composites.viirs import ReflectanceCorrector - from satpy import DatasetID - sataa_did = DatasetID(name='satellite_azimuth_angle') - satza_did = DatasetID(name='satellite_zenith_angle') - solaa_did = DatasetID(name='solar_azimuth_angle') - solza_did = DatasetID(name='solar_zenith_angle') + from satpy.tests.utils import make_dsq + sataa_did = make_dsq(name='satellite_azimuth_angle') + satza_did = make_dsq(name='satellite_zenith_angle') + solaa_did = make_dsq(name='solar_azimuth_angle') + solza_did = make_dsq(name='solar_zenith_angle') ref_cor = ReflectanceCorrector( dem_filename='_fake.hdf', optional_prerequisites=[sataa_did, satza_did, solaa_did, solza_did], name='1', prerequisites=[], wavelength=(0.62, 0.645, 0.67), resolution=250, calibration='reflectance', @@ -422,10 +421,10 @@ def test_reflectance_corrector_modis(self): self.assertEqual(ref_cor.attrs['sensor'], 'modis') self.assertEqual(ref_cor.attrs['prerequisites'], []) self.assertEqual(ref_cor.attrs['optional_prerequisites'], [ - DatasetID(name='satellite_azimuth_angle'), - DatasetID(name='satellite_zenith_angle'), - DatasetID(name='solar_azimuth_angle'), - DatasetID(name='solar_zenith_angle')]) + make_dsq(name='satellite_azimuth_angle'), + make_dsq(name='satellite_zenith_angle'), + make_dsq(name='solar_azimuth_angle'), + make_dsq(name='solar_zenith_angle')]) area, dnb = self.data_area_ref_corrector() diff --git a/satpy/tests/features/steps/steps-load.py b/satpy/tests/features/steps/steps-load.py index c966b065f7..f8de2b28fa 100644 --- a/satpy/tests/features/steps/steps-load.py +++ b/satpy/tests/features/steps/steps-load.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -""" -""" +"""Behaviour steps for loading.""" import os from behave import use_step_matcher, given, when, then @@ -27,6 +26,7 @@ @given(u'data is available') def step_impl_data_available(context): + """Make data available.""" if not os.path.exists('/tmp/SVM02_npp_d20150311_t1122204_e1123446_b17451_c20150311113206961730_cspp_dev.h5'): response = urlopen('https://zenodo.org/record/16355/files/' 'SVM02_npp_d20150311_t1122204_e1123446_b17451_c20150311113206961730_cspp_dev.h5') @@ -43,6 +43,7 @@ def step_impl_data_available(context): @when(u'user loads the data without providing a config file') def step_impl_user_loads_no_config(context): + """Load the data without a config.""" from satpy import Scene, find_files_and_readers from datetime import datetime os.chdir("/tmp/") @@ -56,21 +57,24 @@ def step_impl_user_loads_no_config(context): @then(u'the data is available in a scene object') def step_impl_data_available_in_scene(context): + """Check that the data is available in the scene.""" assert (context.scene["M02"] is not None) try: context.scene["M01"] is None - assert False + raise AssertionError() except KeyError: - assert True + pass @when(u'some items are not available') def step_impl_items_not_available(context): + """Load some data.""" context.scene.load(["M01"]) @when(u'user wants to know what data is available') def step_impl_user_checks_availability(context): + """Check availability.""" from satpy import Scene, find_files_and_readers from datetime import datetime os.chdir("/tmp/") @@ -83,6 +87,7 @@ def step_impl_user_checks_availability(context): @then(u'available datasets are returned') def step_impl_available_datasets_are_returned(context): + """Check that datasets are returned.""" assert (len(context.available_dataset_ids) >= 5) @@ -91,14 +96,15 @@ def step_impl_datasets_with_same_name(context): """Datasets with the same name but different other ID parameters.""" from satpy import Scene from xarray import DataArray - from satpy.dataset import DatasetID + from satpy.tests.utils import make_dataid scn = Scene() - scn[DatasetID('ds1', calibration='radiance')] = DataArray([[1, 2], [3, 4]]) - scn[DatasetID('ds1', resolution=500, calibration='reflectance')] = DataArray([[5, 6], [7, 8]]) - scn[DatasetID('ds1', resolution=250, calibration='reflectance')] = DataArray([[5, 6], [7, 8]]) - scn[DatasetID('ds1', resolution=1000, calibration='reflectance')] = DataArray([[5, 6], [7, 8]]) - scn[DatasetID('ds1', resolution=500, calibration='radiance', modifiers=('mod1',))] = DataArray([[5, 6], [7, 8]]) - ds_id = DatasetID('ds1', resolution=1000, calibration='radiance', modifiers=('mod1', 'mod2')) + scn[make_dataid(name='ds1', calibration='radiance')] = DataArray([[1, 2], [3, 4]]) + scn[make_dataid(name='ds1', resolution=500, calibration='reflectance')] = DataArray([[5, 6], [7, 8]]) + scn[make_dataid(name='ds1', resolution=250, calibration='reflectance')] = DataArray([[5, 6], [7, 8]]) + scn[make_dataid(name='ds1', resolution=1000, calibration='reflectance')] = DataArray([[5, 6], [7, 8]]) + scn[make_dataid(name='ds1', resolution=500, calibration='radiance', modifiers=('mod1',))] = \ + DataArray([[5, 6], [7, 8]]) + ds_id = make_dataid(name='ds1', resolution=1000, calibration='radiance', modifiers=('mod1', 'mod2')) scn[ds_id] = DataArray([[5, 6], [7, 8]]) context.scene = scn @@ -111,5 +117,5 @@ def step_impl_dataset_retrieved_by_name(context): @then("the least modified version of the dataset is returned") def step_impl_least_modified_dataset_returned(context): - """The dataset should be one of the least modified datasets.""" + """Check that the dataset should be one of the least modified datasets.""" assert(len(context.returned_dataset.attrs['modifiers']) == 0) diff --git a/satpy/tests/reader_tests/test_aapp_l1b.py b/satpy/tests/reader_tests/test_aapp_l1b.py index 793f11489b..f38169a6ad 100644 --- a/satpy/tests/reader_tests/test_aapp_l1b.py +++ b/satpy/tests/reader_tests/test_aapp_l1b.py @@ -22,7 +22,7 @@ from satpy.readers.aapp_l1b import _HEADERTYPE, _SCANTYPE, AVHRRAAPPL1BFile import tempfile import datetime -from satpy import DatasetID +from satpy.tests.utils import make_dataid class TestAAPPL1B(unittest.TestCase): @@ -93,7 +93,7 @@ def test_read(self): mins = [] maxs = [] for name in ['1', '2', '3a']: - key = DatasetID(name=name, calibration='reflectance') + key = make_dataid(name=name, calibration='reflectance') res = fh.get_dataset(key, info) assert(res.min() == 0) assert(res.max() >= 100) @@ -103,7 +103,7 @@ def test_read(self): assert(np.all(np.isnan(res[:2, :]))) for name in ['3b', '4', '5']: - key = DatasetID(name=name, calibration='reflectance') + key = make_dataid(name=name, calibration='reflectance') res = fh.get_dataset(key, info) mins.append(res.min().values) maxs.append(res.max().values) @@ -123,7 +123,7 @@ def test_angles(self): fh = AVHRRAAPPL1BFile(tmpfile, self.filename_info, self.filetype_info) info = {} - key = DatasetID(name='solar_zenith_angle') + key = make_dataid(name='solar_zenith_angle') res = fh.get_dataset(key, info) assert(np.all(res == 0)) @@ -136,9 +136,9 @@ def test_navigation(self): fh = AVHRRAAPPL1BFile(tmpfile, self.filename_info, self.filetype_info) info = {} - key = DatasetID(name='longitude') + key = make_dataid(name='longitude') res = fh.get_dataset(key, info) assert(np.all(res == 0)) - key = DatasetID(name='latitude') + key = make_dataid(name='latitude') res = fh.get_dataset(key, info) assert(np.all(res == 0)) diff --git a/satpy/tests/reader_tests/test_abi_l1b.py b/satpy/tests/reader_tests/test_abi_l1b.py index f0b92e36dd..8e8d606e0f 100644 --- a/satpy/tests/reader_tests/test_abi_l1b.py +++ b/satpy/tests/reader_tests/test_abi_l1b.py @@ -17,11 +17,14 @@ # satpy. If not, see . """The abi_l1b reader tests package.""" -import numpy as np -import xarray as xr import unittest from unittest import mock +import numpy as np +import xarray as xr + +from satpy.tests.utils import make_dataid + class Test_NC_ABI_L1B_Base(unittest.TestCase): """Common setup for NC_ABI_L1B tests.""" @@ -124,8 +127,7 @@ def test_basic_attributes(self): def test_get_dataset(self): """Test the get_dataset method.""" - from satpy import DatasetID - key = DatasetID(name='Rad', calibration='radiance') + key = make_dataid(name='Rad', calibration='radiance') res = self.reader.get_dataset(key, {'info': 'info'}) exp = {'calibration': 'radiance', 'instrument_ID': None, @@ -157,12 +159,6 @@ def test_get_dataset(self): self.assertNotIn('time', res.coords) self.assertNotIn('time', res.dims) - def test_bad_calibration(self): - """Test that asking for a bad calibration fails.""" - from satpy import DatasetID - self.assertRaises(ValueError, self.reader.get_dataset, - DatasetID(name='C05', calibration='_bad_'), {}) - @mock.patch('satpy.readers.abi_base.geometry.AreaDefinition') def test_get_area_def(self, adef): """Test the area generation.""" @@ -198,9 +194,8 @@ def setUp(self): def test_ir_calibrate(self): """Test IR calibration.""" - from satpy import DatasetID res = self.reader.get_dataset( - DatasetID(name='C05', calibration='brightness_temperature'), {}) + make_dataid(name='C05', calibration='brightness_temperature'), {}) expected = np.array([[267.55572248, 305.15576503, 332.37383249, 354.73895301, 374.19710115], [391.68679226, 407.74064808, 422.69329105, 436.77021913, np.nan]]) @@ -234,9 +229,8 @@ def setUp(self): def test_vis_calibrate(self): """Test VIS calibration.""" - from satpy import DatasetID res = self.reader.get_dataset( - DatasetID(name='C05', calibration='reflectance'), {}) + make_dataid(name='C05', calibration='reflectance'), {}) expected = np.array([[0.15265617, 0.30531234, 0.45796851, 0.61062468, 0.76328085], [0.91593702, 1.06859319, 1.22124936, np.nan, 1.52656171]]) diff --git a/satpy/tests/reader_tests/test_abi_l2_nc.py b/satpy/tests/reader_tests/test_abi_l2_nc.py index 021be31e3a..1c62e48604 100644 --- a/satpy/tests/reader_tests/test_abi_l2_nc.py +++ b/satpy/tests/reader_tests/test_abi_l2_nc.py @@ -88,8 +88,8 @@ class Test_NC_ABI_L2_get_dataset(Test_NC_ABI_L2_base): def test_get_dataset(self): """Test basic L2 load.""" - from satpy import DatasetID - key = DatasetID(name='HT') + from satpy.tests.utils import make_dataid + key = make_dataid(name='HT') res = self.reader.get_dataset(key, {'file_key': 'HT'}) exp_data = np.array([[2 * 0.3052037, np.nan], diff --git a/satpy/tests/reader_tests/test_agri_l1.py b/satpy/tests/reader_tests/test_agri_l1.py index 67c0e1c447..4083b49ed1 100644 --- a/satpy/tests/reader_tests/test_agri_l1.py +++ b/satpy/tests/reader_tests/test_agri_l1.py @@ -17,19 +17,23 @@ # satpy. If not, see . """The agri_l1 reader tests package.""" -from satpy.tests.reader_tests.test_hdf5_utils import FakeHDF5FileHandler -import numpy as np -import dask.array as da -import xarray as xr import os import unittest from unittest import mock +import dask.array as da +import numpy as np +import pytest +import xarray as xr + +from satpy.tests.reader_tests.test_hdf5_utils import FakeHDF5FileHandler + class FakeHDF5FileHandler2(FakeHDF5FileHandler): """Swap-in HDF5 File Handler.""" def make_test_data(self, cwl, ch, prefix, dims, file_type): + """Make test data.""" if prefix == 'CAL': data = xr.DataArray( da.from_array((np.arange(10.) + 1.) / 10., [dims[0] * dims[1]]), @@ -123,7 +127,7 @@ def _get_500m_data(self, file_type): chs = [2] cwls = [0.65] data = {} - for index, cwl in enumerate(cwls): + for index, _cwl in enumerate(cwls): data['CALChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'CAL', [dim_0, dim_1], file_type) data['NOMChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'NOM', @@ -139,7 +143,7 @@ def _get_1km_data(self, file_type): chs = np.linspace(1, 3, 3) cwls = [0.47, 0.65, 0.83] data = {} - for index, cwl in enumerate(cwls): + for index, _cwl in enumerate(cwls): data['CALChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'CAL', [dim_0, dim_1], file_type) data['NOMChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'NOM', @@ -155,7 +159,7 @@ def _get_2km_data(self, file_type): chs = np.linspace(1, 7, 7) cwls = [0.47, 0.65, 0.83, 1.37, 1.61, 2.22, 3.72] data = {} - for index, cwl in enumerate(cwls): + for index, _cwl in enumerate(cwls): data['CALChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'CAL', [dim_0, dim_1], file_type) data['NOMChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'NOM', @@ -171,7 +175,7 @@ def _get_4km_data(self, file_type): chs = np.linspace(1, 14, 14) cwls = [0.47, 0.65, 0.83, 1.37, 1.61, 2.22, 3.72, 3.72, 6.25, 7.10, 8.50, 10.8, 12, 13.5] data = {} - for index, cwl in enumerate(cwls): + for index, _cwl in enumerate(cwls): data['CALChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'CAL', [dim_0, dim_1], file_type) data['NOMChannel' + '%02d' % chs[index]] = self.make_test_data(cwls[index], chs[index], 'NOM', @@ -218,6 +222,7 @@ def get_test_content(self, filename, filename_info, filetype_info): class Test_HDF_AGRI_L1_cal(unittest.TestCase): """Test VIRR L1B Reader.""" + yaml_file = "agri_l1.yaml" def setUp(self): @@ -236,7 +241,7 @@ def tearDown(self): def test_fy4a_all_resolutions(self): """Test loading data when all resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.readers import load_reader, get_key filenames = [ 'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_0500M_V0001.HDF', @@ -256,22 +261,22 @@ def test_fy4a_all_resolutions(self): # 500m band_names = ['C' + '%02d' % ch for ch in np.linspace(2, 2, 1)] for band_name in band_names: - ds_id = DatasetID(name=band_name, resolution=500) - res = get_key(ds_id, available_datasets, num_results=0, best=False) + ds_q = make_dsq(name=band_name, resolution=500) + res = get_key(ds_q, available_datasets, num_results=0, best=False) self.assertEqual(2, len(res)) # 1km band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 3, 3)] for band_name in band_names: - ds_id = DatasetID(name=band_name, resolution=1000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) + ds_q = make_dsq(name=band_name, resolution=1000) + res = get_key(ds_q, available_datasets, num_results=0, best=False) self.assertEqual(2, len(res)) # 2km band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 7, 7)] for band_name in band_names: - ds_id = DatasetID(name=band_name, resolution=2000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) + ds_q = make_dsq(name=band_name, resolution=2000) + res = get_key(ds_q, available_datasets, num_results=0, best=False) if band_name < 'C07': self.assertEqual(2, len(res)) else: @@ -302,7 +307,7 @@ def test_fy4a_all_resolutions(self): def test_fy4a_counts_calib(self): """Test loading data at counts calibration.""" - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.readers import load_reader filenames = [ 'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_0500M_V0001.HDF', @@ -320,7 +325,7 @@ def test_fy4a_counts_calib(self): ds_ids = [] band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 14, 14)] for band_name in band_names: - ds_ids.append(DatasetID(name=band_name, calibration='counts')) + ds_ids.append(make_dsq(name=band_name, calibration='counts')) res = reader.load(ds_ids) self.assertEqual(14, len(res)) @@ -332,7 +337,7 @@ def test_fy4a_counts_calib(self): def test_fy4a_4km_resolutions(self): """Test loading data when only 4km resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.readers import load_reader, get_key filenames = [ 'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_4000M_V0001.HDF', @@ -347,19 +352,14 @@ def test_fy4a_4km_resolutions(self): # Verify that the resolution is only 4km available_datasets = reader.available_dataset_ids band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 14, 14)] - for band_name in band_names: - ds_id = DatasetID(name=band_name, resolution=500) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=1000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=2000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=4000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) + for resolution in [500, 1000, 2000]: + ds_q = make_dsq(name=band_name, resolution=resolution) + with pytest.raises(KeyError): + res = get_key(ds_q, available_datasets, num_results=0, best=False) + + ds_q = make_dsq(name=band_name, resolution=4000) + res = get_key(ds_q, available_datasets, num_results=0, best=False) if band_name < 'C07': self.assertEqual(2, len(res)) else: @@ -392,7 +392,7 @@ def test_fy4a_4km_resolutions(self): def test_fy4a_2km_resolutions(self): """Test loading data when only 2km resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.readers import load_reader, get_key filenames = [ 'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_2000M_V0001.HDF', @@ -409,21 +409,17 @@ def test_fy4a_2km_resolutions(self): band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 7, 7)] for band_name in band_names: - ds_id = DatasetID(name=band_name, resolution=500) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=1000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=2000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) + for resolution in [500, 1000, 4000]: + ds_q = make_dsq(name=band_name, resolution=resolution) + with pytest.raises(KeyError): + res = get_key(ds_q, available_datasets, num_results=0, best=False) + + ds_q = make_dsq(name=band_name, resolution=2000) + res = get_key(ds_q, available_datasets, num_results=0, best=False) if band_name < 'C07': self.assertEqual(2, len(res)) else: self.assertEqual(3, len(res)) - ds_id = DatasetID(name=band_name, resolution=4000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) res = reader.load(band_names) self.assertEqual(7, len(res)) @@ -451,7 +447,7 @@ def test_fy4a_2km_resolutions(self): def test_fy4a_1km_resolutions(self): """Test loading data when only 1km resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.readers import load_reader, get_key filenames = [ 'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_1000M_V0001.HDF', @@ -468,18 +464,14 @@ def test_fy4a_1km_resolutions(self): band_names = ['C' + '%02d' % ch for ch in np.linspace(1, 3, 3)] for band_name in band_names: - ds_id = DatasetID(name=band_name, resolution=500) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=1000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) + for resolution in [500, 2000, 4000]: + ds_q = make_dsq(name=band_name, resolution=resolution) + with pytest.raises(KeyError): + res = get_key(ds_q, available_datasets, num_results=0, best=False) + + ds_q = make_dsq(name=band_name, resolution=1000) + res = get_key(ds_q, available_datasets, num_results=0, best=False) self.assertEqual(2, len(res)) - ds_id = DatasetID(name=band_name, resolution=2000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=4000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) res = reader.load(band_names) self.assertEqual(3, len(res)) @@ -498,7 +490,7 @@ def test_fy4a_1km_resolutions(self): def test_fy4a_500m_resolutions(self): """Test loading data when only 500m resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.readers import load_reader, get_key filenames = [ 'FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20190603003000_20190603003416_0500M_V0001.HDF', @@ -515,18 +507,14 @@ def test_fy4a_500m_resolutions(self): band_names = ['C' + '%02d' % ch for ch in np.linspace(2, 2, 1)] for band_name in band_names: - ds_id = DatasetID(name=band_name, resolution=500) - res = get_key(ds_id, available_datasets, num_results=0, best=False) + for resolution in [1000, 2000, 4000]: + ds_q = make_dsq(name=band_name, resolution=resolution) + with pytest.raises(KeyError): + res = get_key(ds_q, available_datasets, num_results=0, best=False) + + ds_q = make_dsq(name=band_name, resolution=500) + res = get_key(ds_q, available_datasets, num_results=0, best=False) self.assertEqual(2, len(res)) - ds_id = DatasetID(name=band_name, resolution=1000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=2000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=4000) - res = get_key(ds_id, available_datasets, num_results=0, best=False) - self.assertEqual(0, len(res)) res = reader.load(band_names) self.assertEqual(1, len(res)) diff --git a/satpy/tests/reader_tests/test_ahi_hrit.py b/satpy/tests/reader_tests/test_ahi_hrit.py index ccc440ffa7..3ae3d26c7e 100644 --- a/satpy/tests/reader_tests/test_ahi_hrit.py +++ b/satpy/tests/reader_tests/test_ahi_hrit.py @@ -302,7 +302,7 @@ def test_get_dataset(self, base_get_dataset): log_mock.assert_called() def test_mjd2datetime64(self): - """Test conversion from modified julian day to datetime64""" + """Test conversion from modified julian day to datetime64.""" from satpy.readers.hrit_jma import mjd2datetime64 self.assertEqual(mjd2datetime64(np.array([0])), np.datetime64('1858-11-17', 'us')) diff --git a/satpy/tests/reader_tests/test_ahi_hsd.py b/satpy/tests/reader_tests/test_ahi_hsd.py index bb6ae6ec2a..5166b18492 100644 --- a/satpy/tests/reader_tests/test_ahi_hsd.py +++ b/satpy/tests/reader_tests/test_ahi_hsd.py @@ -200,6 +200,7 @@ def test_time_properties(self): self.assertEqual(self.fh.scheduled_time, datetime(2018, 10, 22, 3, 0, 0, 0)) def test_scanning_frequencies(self): + """Test scanning frequencies.""" self.fh.observation_area = 'JP04' self.assertEqual(self.fh.scheduled_time, datetime(2018, 10, 22, 3, 7, 30, 0)) self.fh.observation_area = 'R304' diff --git a/satpy/tests/reader_tests/test_ami_l1b.py b/satpy/tests/reader_tests/test_ami_l1b.py index 1f0e05cfa1..501689c5a0 100644 --- a/satpy/tests/reader_tests/test_ami_l1b.py +++ b/satpy/tests/reader_tests/test_ami_l1b.py @@ -178,8 +178,8 @@ def test_basic_attributes(self): def test_get_dataset(self): """Test gettting radiance data.""" - from satpy import DatasetID - key = DatasetID(name='VI006', calibration='radiance') + from satpy.tests.utils import make_dataid + key = make_dataid(name='VI006', calibration='radiance') res = self.reader.get_dataset(key, { 'file_key': 'image_pixel_values', 'standard_name': 'toa_outgoing_radiance_per_unit_wavelength', @@ -194,16 +194,6 @@ def test_get_dataset(self): self.assertEqual(val, res.attrs[key]) self._check_orbital_parameters(res.attrs['orbital_parameters']) - def test_bad_calibration(self): - """Test that asking for a bad calibration fails.""" - from satpy import DatasetID - self.assertRaises(ValueError, self.reader.get_dataset, - DatasetID(name='VI006', calibration='_bad_'), - {'file_key': 'image_pixel_values', - 'standard_name': 'toa_outgoing_radiance_per_unit_wavelength', - 'units': 'W m-2 um-1 sr-1', - }) - @mock.patch('satpy.readers.abi_base.geometry.AreaDefinition') def test_get_area_def(self, adef): """Test the area generation.""" @@ -223,8 +213,8 @@ def test_get_area_def(self, adef): def test_get_dataset_vis(self): """Test get visible calibrated data.""" - from satpy import DatasetID - key = DatasetID(name='VI006', calibration='reflectance') + from satpy.tests.utils import make_dataid + key = make_dataid(name='VI006', calibration='reflectance') res = self.reader.get_dataset(key, { 'file_key': 'image_pixel_values', 'standard_name': 'toa_bidirectional_reflectance', @@ -241,8 +231,8 @@ def test_get_dataset_vis(self): def test_get_dataset_counts(self): """Test get counts data.""" - from satpy import DatasetID - key = DatasetID(name='VI006', calibration='counts') + from satpy.tests.utils import make_dataid + key = make_dataid(name='VI006', calibration='counts') res = self.reader.get_dataset(key, { 'file_key': 'image_pixel_values', 'standard_name': 'counts', @@ -290,10 +280,10 @@ def setUp(self): def test_ir_calibrate(self): """Test IR calibration.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid from satpy.readers.ami_l1b import rad2temp - ds_id = DatasetID(name='IR087', wavelength=[8.415, 8.59, 8.765], - calibration='brightness_temperature') + ds_id = make_dataid(name='IR087', wavelength=[8.415, 8.59, 8.765], + calibration='brightness_temperature') ds_info = { 'file_key': 'image_pixel_values', 'wavelength': [8.415, 8.59, 8.765], diff --git a/satpy/tests/reader_tests/test_amsr2_l2.py b/satpy/tests/reader_tests/test_amsr2_l2.py index af02960401..70c667cb83 100644 --- a/satpy/tests/reader_tests/test_amsr2_l2.py +++ b/satpy/tests/reader_tests/test_amsr2_l2.py @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Unit tests for AMSR L2 reader""" +"""Unit tests for AMSR L2 reader.""" import os import unittest @@ -36,9 +36,10 @@ class FakeHDF5FileHandler2(FakeHDF5FileHandler): - """Swap-in HDF5 File Handler""" + """Swap-in HDF5 File Handler.""" + def get_test_content(self, filename, filename_info, filetype_info): - """Mimic reader input file content""" + """Mimic reader input file content.""" file_content = { '/attr/PlatformShortName': 'GCOM-W1', '/attr/SensorShortName': 'AMSR2', @@ -67,11 +68,12 @@ def get_test_content(self, filename, filename_info, filetype_info): class TestAMSR2L2Reader(unittest.TestCase): - """Test AMSR2 L2 Reader""" + """Test AMSR2 L2 Reader.""" + yaml_file = "amsr2_l2.yaml" def setUp(self): - """Wrap HDF5 file handler with our own fake handler""" + """Wrap HDF5 file handler with our own fake handler.""" from satpy.config import config_search_paths from satpy.readers.amsr2_l2 import AMSR2L2FileHandler from satpy.readers.amsr2_l1b import AMSR2L1BFileHandler @@ -83,7 +85,7 @@ def setUp(self): self.p.is_local = True def tearDown(self): - """Stop wrapping the HDF5 file handler""" + """Stop wrapping the HDF5 file handler.""" self.p.stop() def test_init(self): @@ -99,7 +101,7 @@ def test_init(self): self.assertTrue(r.file_handlers) def test_load_basic(self): - """Test loading of basic channels""" + """Test loading of basic channels.""" from satpy.readers import load_reader r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ diff --git a/satpy/tests/reader_tests/test_avhrr_l1b_gaclac.py b/satpy/tests/reader_tests/test_avhrr_l1b_gaclac.py index ecbe2f46ff..87c1605714 100644 --- a/satpy/tests/reader_tests/test_avhrr_l1b_gaclac.py +++ b/satpy/tests/reader_tests/test_avhrr_l1b_gaclac.py @@ -18,12 +18,12 @@ """Pygac interface.""" from datetime import datetime +from unittest import TestCase, mock + import dask.array as da -from unittest import TestCase import numpy as np +import pytest import xarray as xr -from unittest import mock - GAC_PATTERN = '{creation_site:3s}.{transfer_mode:4s}.{platform_id:2s}.D{start_time:%y%j.S%H%M}.E{end_time:%H%M}.B{orbit_number:05d}{end_orbit_last_digits:02d}.{station:2s}' # noqa @@ -152,7 +152,8 @@ def test_init(self): @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile.read_raw_data') @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile._get_channel') def test_get_dataset_channels(self, get_channel, *mocks): - from satpy.dataset import DatasetID + """Test getting the channel datasets.""" + from satpy.tests.utils import make_dataid # Mock reader and file handler fh = self._get_fh_mocked( @@ -168,7 +169,7 @@ def test_get_dataset_channels(self, get_channel, *mocks): # Test calibration to reflectance as well as attributes. counts = np.ones((3, 3)) get_channel.return_value = counts - key = DatasetID('1', calibration='reflectance') + key = make_dataid(name='1', calibration='reflectance') info = {'name': '1', 'standard_name': 'my_standard_name'} res = fh.get_dataset(key=key, info=info) @@ -188,12 +189,13 @@ def test_get_dataset_channels(self, get_channel, *mocks): get_channel.assert_called_with(key) # Counts & brightness temperature: Similar, just check _get_channel() call - for key in [DatasetID('1', calibration='counts'), - DatasetID('5', calibration='brightness_temperature')]: + for key in [make_dataid(name='1', calibration='counts'), + make_dataid(name='5', calibration='brightness_temperature')]: fh.get_dataset(key=key, info={'name': 1}) get_channel.assert_called_with(key) def test_read_raw_data(self): + """Test raw data reading.""" fh = self._get_fh_mocked(reader=None, interpolate_coords='interpolate_coords', creation_site='creation_site', @@ -218,7 +220,8 @@ def test_read_raw_data(self): @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile.slice') @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile._get_channel') def test_get_dataset_slice(self, get_channel, slc, *mocks): - from satpy.dataset import DatasetID + """Get a slice of a dataset.""" + from satpy.tests.utils import make_dataid # Test slicing/stripping def slice_patched(data, times): @@ -247,7 +250,7 @@ def slice_patched(data, times): **kwargs ) - key = DatasetID('1', calibration='reflectance') + key = make_dataid(name='1', calibration='reflectance') info = {'name': '1', 'standard_name': 'reflectance'} res = fh.get_dataset(key, info) np.testing.assert_array_equal(res.data, ch[1:3, :]) @@ -257,7 +260,8 @@ def slice_patched(data, times): @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile._update_attrs') def test_get_dataset_latlon(self, *mocks): - from satpy.dataset import DatasetID + """Test getting the latitudes and longitudes.""" + from satpy.tests.utils import make_dataid lons = np.ones((3, 3)) lats = 2 * lons @@ -273,7 +277,7 @@ def test_get_dataset_latlon(self, *mocks): # With interpolation of coordinates for name, exp_data in zip(['longitude', 'latitude'], [lons, lats]): - key = DatasetID(name) + key = make_dataid(name=name) info = {'name': name, 'standard_name': 'my_standard_name'} res = fh.get_dataset(key=key, info=info) exp = xr.DataArray(exp_data, @@ -284,8 +288,8 @@ def test_get_dataset_latlon(self, *mocks): # Without interpolation of coordinates fh.interpolate_coords = False - for name, exp_data in zip(['longitude', 'latitude'], [lons, lats]): - key = DatasetID(name) + for name, _exp_data in zip(['longitude', 'latitude'], [lons, lats]): + key = make_dataid(name=name) info = {'name': name, 'standard_name': 'my_standard_name'} res = fh.get_dataset(key=key, info=info) self.assertTupleEqual(res.dims, ('y', 'x_every_eighth')) @@ -293,7 +297,8 @@ def test_get_dataset_latlon(self, *mocks): @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile._update_attrs') @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile._get_angle') def test_get_dataset_angles(self, get_angle, *mocks): - from satpy.dataset import DatasetID + """Test getting the angles.""" + from satpy.tests.utils import make_dataid from satpy.readers.avhrr_l1b_gaclac import ANGLES ones = np.ones((3, 3)) @@ -309,7 +314,7 @@ def test_get_dataset_angles(self, get_angle, *mocks): # With interpolation of coordinates for angle in ANGLES: - key = DatasetID(angle) + key = make_dataid(name=angle) info = {'name': angle, 'standard_name': 'my_standard_name'} res = fh.get_dataset(key=key, info=info) exp = xr.DataArray(ones, @@ -321,14 +326,15 @@ def test_get_dataset_angles(self, get_angle, *mocks): # Without interpolation of coordinates fh.interpolate_coords = False for angle in ANGLES: - key = DatasetID(angle) + key = make_dataid(name=angle) info = {'name': angle, 'standard_name': 'my_standard_name'} res = fh.get_dataset(key=key, info=info) self.assertTupleEqual(res.dims, ('y', 'x_every_eighth')) @mock.patch('satpy.readers.avhrr_l1b_gaclac.GACLACFile._update_attrs') def test_get_dataset_qual_flags(self, *mocks): - from satpy.dataset import DatasetID + """Test getting the qualitiy flags.""" + from satpy.tests.utils import make_dataid qual_flags = np.ones((3, 7)) reader = self._get_reader_mocked() @@ -341,7 +347,7 @@ def test_get_dataset_qual_flags(self, *mocks): interpolate_coords=True ) - key = DatasetID('qual_flags') + key = make_dataid(name='qual_flags') info = {'name': 'qual_flags'} res = fh.get_dataset(key=key, info=info) exp = xr.DataArray(qual_flags, @@ -358,7 +364,8 @@ def test_get_dataset_qual_flags(self, *mocks): xr.testing.assert_equal(res, exp) def test_get_channel(self): - from satpy.dataset import DatasetID + """Test getting the channels.""" + from satpy.tests.utils import make_dataid counts = np.moveaxis(np.array([[[1, 2, 3], [4, 5, 6]]]), 0, 2) @@ -369,7 +376,7 @@ def test_get_channel(self): fh = self._get_fh_mocked(reader=reader, counts=None, calib_channels=None, chn_dict={'1': 0}) - key = DatasetID('1', calibration='counts') + key = make_dataid(name='1', calibration='counts') # Counts res = fh._get_channel(key=key) np.testing.assert_array_equal(res, [[1, 2, 3], @@ -378,38 +385,38 @@ def test_get_channel(self): # Reflectance and Brightness Temperature for calib in ['reflectance', 'brightness_temperature']: - key = DatasetID('1', calibration=calib) + key = make_dataid(name='1', calibration=calib) res = fh._get_channel(key=key) np.testing.assert_array_equal(res, [[2, 4, 6], [8, 10, 12]]) np.testing.assert_array_equal(fh.calib_channels, calib_channels) # Invalid - key = DatasetID('7', calibration='coffee') - self.assertRaises(ValueError, fh._get_channel, key=key) + with pytest.raises(ValueError): + key = make_dataid(name='7', calibration='coffee') # Buffering reader.get_counts.reset_mock() - key = DatasetID('1', calibration='counts') + key = make_dataid(name='1', calibration='counts') fh._get_channel(key=key) reader.get_counts.assert_not_called() reader.get_calibrated_channels.reset_mock() for calib in ['reflectance', 'brightness_temperature']: - key = DatasetID('1', calibration=calib) + key = make_dataid(name='1', calibration=calib) fh._get_channel(key) reader.get_calibrated_channels.assert_not_called() def test_get_angle(self): """Test getting the angle.""" - from satpy.dataset import DatasetID + from satpy.tests.utils import make_dataid reader = mock.MagicMock() reader.get_angles.return_value = 1, 2, 3, 4, 5 fh = self._get_fh_mocked(reader=reader, angles=None) # Test angle readout - key = DatasetID('sensor_zenith_angle') + key = make_dataid(name='sensor_zenith_angle') res = fh._get_angle(key) self.assertEqual(res, 2) self.assertDictEqual(fh.angles, {'sensor_zenith_angle': 2, @@ -419,7 +426,7 @@ def test_get_angle(self): 'sun_sensor_azimuth_difference_angle': 5}) # Test buffering - key = DatasetID('sensor_azimuth_angle') + key = make_dataid(name='sensor_azimuth_angle') fh._get_angle(key) reader.get_angles.assert_called_once() diff --git a/satpy/tests/reader_tests/test_clavrx.py b/satpy/tests/reader_tests/test_clavrx.py index cca8cb61c3..7542fccc8b 100644 --- a/satpy/tests/reader_tests/test_clavrx.py +++ b/satpy/tests/reader_tests/test_clavrx.py @@ -39,9 +39,10 @@ class FakeHDF4FileHandlerPolar(FakeHDF4FileHandler): - """Swap-in HDF4 File Handler""" + """Swap-in HDF4 File Handler.""" + def get_test_content(self, filename, filename_info, filetype_info): - """Mimic reader input file content""" + """Mimic reader input file content.""" file_content = { '/attr/platform': 'SNPP', '/attr/sensor': 'VIIRS', @@ -106,10 +107,11 @@ def get_test_content(self, filename, filename_info, filetype_info): class TestCLAVRXReaderPolar(unittest.TestCase): """Test CLAVR-X Reader with Polar files.""" + yaml_file = "clavrx.yaml" def setUp(self): - """Wrap HDF4 file handler with our own fake handler""" + """Wrap HDF4 file handler with our own fake handler.""" from satpy.config import config_search_paths from satpy.readers.clavrx import CLAVRXFileHandler self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file)) @@ -119,7 +121,7 @@ def setUp(self): self.p.is_local = True def tearDown(self): - """Stop wrapping the NetCDF4 file handler""" + """Stop wrapping the NetCDF4 file handler.""" self.p.stop() def test_init(self): @@ -201,7 +203,7 @@ def test_available_datasets(self): self.assertEqual(new_ds_infos[8][1]['resolution'], 742) def test_load_all(self): - """Test loading all test datasets""" + """Test loading all test datasets.""" from satpy.readers import load_reader import xarray as xr r = load_reader(self.reader_configs) @@ -215,15 +217,16 @@ def test_load_all(self): 'variable3']) self.assertEqual(len(datasets), 3) for v in datasets.values(): - self.assertIs(v.attrs['calibration'], None) + assert 'calibration' not in v.attrs self.assertEqual(v.attrs['units'], '1') self.assertIsNotNone(datasets['variable3'].attrs.get('flag_meanings')) class FakeHDF4FileHandlerGeo(FakeHDF4FileHandler): - """Swap-in HDF4 File Handler""" + """Swap-in HDF4 File Handler.""" + def get_test_content(self, filename, filename_info, filetype_info): - """Mimic reader input file content""" + """Mimic reader input file content.""" file_content = { '/attr/platform': 'HIM8', '/attr/sensor': 'AHI', @@ -296,10 +299,11 @@ def get_test_content(self, filename, filename_info, filetype_info): class TestCLAVRXReaderGeo(unittest.TestCase): """Test CLAVR-X Reader with Geo files.""" + yaml_file = "clavrx.yaml" def setUp(self): - """Wrap HDF4 file handler with our own fake handler""" + """Wrap HDF4 file handler with our own fake handler.""" from satpy.config import config_search_paths from satpy.readers.clavrx import CLAVRXFileHandler self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file)) @@ -309,7 +313,7 @@ def setUp(self): self.p.is_local = True def tearDown(self): - """Stop wrapping the NetCDF4 file handler""" + """Stop wrapping the NetCDF4 file handler.""" self.p.stop() def test_init(self): @@ -364,7 +368,7 @@ def test_load_all_old_donor(self): datasets = r.load(['variable1', 'variable2', 'variable3']) self.assertEqual(len(datasets), 3) for v in datasets.values(): - self.assertIs(v.attrs['calibration'], None) + assert 'calibration' not in v.attrs self.assertEqual(v.attrs['units'], '1') self.assertIsInstance(v.attrs['area'], AreaDefinition) self.assertIsNotNone(datasets['variable3'].attrs.get('flag_meanings')) @@ -397,7 +401,7 @@ def test_load_all_new_donor(self): datasets = r.load(['variable1', 'variable2', 'variable3']) self.assertEqual(len(datasets), 3) for v in datasets.values(): - self.assertIs(v.attrs['calibration'], None) + assert 'calibration' not in v.attrs self.assertEqual(v.attrs['units'], '1') self.assertIsInstance(v.attrs['area'], AreaDefinition) self.assertIsNotNone(datasets['variable3'].attrs.get('flag_meanings')) diff --git a/satpy/tests/reader_tests/test_cmsaf_claas.py b/satpy/tests/reader_tests/test_cmsaf_claas.py index b8933efb37..b1d9d5ba70 100644 --- a/satpy/tests/reader_tests/test_cmsaf_claas.py +++ b/satpy/tests/reader_tests/test_cmsaf_claas.py @@ -34,6 +34,7 @@ class FakeNetCDF4FileHandler2(FakeNetCDF4FileHandler): _ncols = 40 def __init__(self, *args, auto_maskandscale, **kwargs): + """Init the file handler.""" # make sure that CLAAS2 reader asks NetCDF4FileHandler for having # auto_maskandscale enabled assert auto_maskandscale @@ -128,7 +129,6 @@ def fake_handler(): def test_file_pattern(reader): """Test file pattern matching.""" - filenames = [ "CTXin20040120091500305SVMSG01MD.nc", "CTXin20040120093000305SVMSG01MD.nc", @@ -142,7 +142,7 @@ def test_file_pattern(reader): def test_load(reader): """Test loading.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid # testing two filenames to test correctly combined filenames = [ @@ -152,7 +152,7 @@ def test_load(reader): loadables = reader.select_files_from_pathnames(filenames) reader.create_filehandlers(loadables) res = reader.load( - [DatasetID(name=name) for name in ["cph", "ctt"]]) + [make_dataid(name=name) for name in ["cph", "ctt"]]) assert 2 == len(res) assert reader.start_time == datetime.datetime(1985, 8, 13, 13, 15) assert reader.end_time == datetime.datetime(2085, 8, 13, 13, 15) diff --git a/satpy/tests/reader_tests/test_electrol_hrit.py b/satpy/tests/reader_tests/test_electrol_hrit.py index a03c41d04e..a5c3d7c2c7 100644 --- a/satpy/tests/reader_tests/test_electrol_hrit.py +++ b/satpy/tests/reader_tests/test_electrol_hrit.py @@ -18,20 +18,19 @@ """The HRIT electrol reader tests package.""" import datetime -import numpy as np +import unittest +from unittest import mock + import dask.array as da +import numpy as np from xarray import DataArray -from satpy.readers.electrol_hrit import (recarray2dict, prologue, - HRITGOMSPrologueFileHandler, - HRITGOMSEpilogueFileHandler, +from satpy.readers.electrol_hrit import (HRITGOMSEpilogueFileHandler, HRITGOMSFileHandler, - satellite_status, - image_acquisition, - epilogue) - -import unittest -from unittest import mock + HRITGOMSPrologueFileHandler, epilogue, + image_acquisition, prologue, + recarray2dict, satellite_status) +from satpy.tests.utils import make_dataid # Simplify some type selections f64_t = np.float64 @@ -41,7 +40,9 @@ class Testrecarray2dict(unittest.TestCase): """Test the function that converts numpy record arrays into dicts for use within SatPy.""" + def test_fun(self): + """Test record array.""" inner_st = np.dtype([('test_str', '. """The hrit msg reader tests package.""" -import unittest import datetime -import numpy as np +import unittest from unittest import mock + +import numpy as np from xarray import DataArray -from satpy.readers.goes_imager_hrit import (make_gvar_float, make_sgs_time, - HRITGOESPrologueFileHandler, sgs_time, - HRITGOESFileHandler, ALTITUDE) + +from satpy.readers.goes_imager_hrit import (ALTITUDE, HRITGOESFileHandler, + HRITGOESPrologueFileHandler, + make_gvar_float, make_sgs_time, + sgs_time) +from satpy.tests.utils import make_dataid class TestGVARFloat(unittest.TestCase): + """GVAR float tester.""" + def test_fun(self): + """Test function.""" test_data = [(-1.0, b"\xbe\xf0\x00\x00"), (-0.1640625, b"\xbf\xd6\x00\x00"), (0.0, b"\x00\x00\x00\x00"), @@ -42,7 +49,10 @@ def test_fun(self): class TestMakeSGSTime(unittest.TestCase): + """SGS Time tester.""" + def test_fun(self): + """Encode the test time.""" # 2018-129 (may 9th), 21:33:27.999 tcds = np.array([(32, 24, 18, 146, 19, 50, 121, 153)], dtype=sgs_time) expected = datetime.datetime(2018, 5, 9, 21, 33, 27, 999000) @@ -115,7 +125,7 @@ class TestHRITGOESFileHandler(unittest.TestCase): @mock.patch('satpy.readers.goes_imager_hrit.HRITFileHandler.__init__') def setUp(self, new_fh_init): - """Setup the hrit file handler for testing.""" + """Set up the hrit file handler for testing.""" blob = '$HALFTONE:=10\r\n_NAME:=albedo\r\n_UNIT:=percent\r\n0:=0.0\r\n1023:=100.0\r\n'.encode() mda = {'projection_parameters': {'SSP_longitude': -123.0}, 'spectral_channel_id': 1, @@ -127,6 +137,7 @@ def setUp(self, new_fh_init): self.reader = HRITGOESFileHandler('filename', {}, {}, self.prologue) def test_init(self): + """Test the init.""" blob = '$HALFTONE:=10\r\n_NAME:=albedo\r\n_UNIT:=percent\r\n0:=0.0\r\n1023:=100.0\r\n'.encode() mda = {'spectral_channel_id': 1, 'projection_parameters': {'SSP_longitude': 100.1640625}, @@ -135,8 +146,8 @@ def test_init(self): @mock.patch('satpy.readers.goes_imager_hrit.HRITFileHandler.get_dataset') def test_get_dataset(self, base_get_dataset): - key = mock.MagicMock() - key.calibration = 'reflectance' + """Test get_dataset.""" + key = make_dataid(name="CH1", calibration='reflectance') base_get_dataset.return_value = DataArray(np.arange(25).reshape(5, 5)) res = self.reader.get_dataset(key, {}) expected = np.array([[np.nan, 0.097752, 0.195503, 0.293255, 0.391007], diff --git a/satpy/tests/reader_tests/test_goes_imager_nc.py b/satpy/tests/reader_tests/test_goes_imager_nc.py index ef0a85786d..17ae48d85a 100644 --- a/satpy/tests/reader_tests/test_goes_imager_nc.py +++ b/satpy/tests/reader_tests/test_goes_imager_nc.py @@ -15,16 +15,20 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . +"""Tests for the goes imager nc reader.""" import datetime import unittest from unittest import mock import numpy as np +import pytest import xarray as xr -from satpy import DatasetID + +from satpy.tests.utils import make_dataid class GOESNCBaseFileHandlerTest(unittest.TestCase): + """Testing the file handler.""" longMessage = True @@ -33,6 +37,7 @@ class GOESNCBaseFileHandlerTest(unittest.TestCase): __abstractmethods__=set(), _get_sector=mock.MagicMock()) def setUp(self, xr_): + """Set up the tests.""" from satpy.readers.goes_imager_nc import CALIB_COEFS, GOESNCBaseFileHandler self.coefs = CALIB_COEFS['GOES-15'] @@ -59,7 +64,7 @@ def setUp(self, xr_): filetype_info={}) def test_init(self): - """Tests reader initialization""" + """Tests reader initialization.""" self.assertEqual(self.reader.nlines, self.dummy2d.shape[0]) self.assertEqual(self.reader.ncols, self.dummy2d.shape[1]) self.assertEqual(self.reader.platform_name, 'GOES-15') @@ -68,7 +73,7 @@ def test_init(self): self.assertIsInstance(self.reader.geo_data, xr.Dataset) def test_get_nadir_pixel(self): - """Test identification of the nadir pixel""" + """Test identification of the nadir pixel.""" from satpy.readers.goes_imager_nc import FULL_DISC earth_mask = np.array([[0, 0, 0, 0], @@ -82,7 +87,7 @@ def test_get_nadir_pixel(self): msg='Incorrect nadir pixel') def test_get_earth_mask(self): - """Test identification of earth/space pixels""" + """Test identification of earth/space pixels.""" lat = xr.DataArray([-100, -90, -45, 0, 45, 90, 100]) expected = np.array([0, 1, 1, 1, 1, 1, 0]) mask = self.reader._get_earth_mask(lat) @@ -90,7 +95,7 @@ def test_get_earth_mask(self): msg='Incorrect identification of earth/space pixel') def test_is_yaw_flip(self): - """Test yaw flip identification""" + """Test yaw flip identification.""" lat_asc = xr.DataArray([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) @@ -103,7 +108,7 @@ def test_is_yaw_flip(self): msg='Yaw flip false alarm') def test_viscounts2radiance(self): - """Test conversion from VIS counts to radiance""" + """Test conversion from VIS counts to radiance.""" # Reference data is for detector #1 slope = self.coefs['00_7']['slope'][0] offset = self.coefs['00_7']['offset'][0] @@ -118,7 +123,7 @@ def test_viscounts2radiance(self): 'radiance') def test_ircounts2radiance(self): - """Test conversion from IR counts to radiance""" + """Test conversion from IR counts to radiance.""" # Test counts counts = xr.DataArray([0, 100, 500, 1000, 1023]) @@ -144,7 +149,7 @@ def test_ircounts2radiance(self): 'radiance in channel {}'.format(ch)) def test_calibrate_vis(self): - """Test VIS calibration""" + """Test VIS calibration.""" rad = xr.DataArray([0, 1, 10, 100, 500]) refl_expected = xr.DataArray([0., 0.188852, 1.88852, 18.8852, 94.426]) refl = self.reader._calibrate_vis(radiance=rad, @@ -154,7 +159,7 @@ def test_calibrate_vis(self): 'reflectance') def test_calibrate_ir(self): - """Test IR calibration""" + """Test IR calibration.""" # Test radiance values and corresponding BT from NOAA lookup tables # rev. H (see [IR]). rad = { @@ -194,11 +199,11 @@ def test_calibrate_ir(self): 'temperature in channel {} detector {}'.format(ch, det)) def test_start_time(self): - """Test dataset start time stamp""" + """Test dataset start time stamp.""" self.assertEqual(self.reader.start_time, self.time) def test_end_time(self): - """Test dataset end time stamp""" + """Test dataset end time stamp.""" from satpy.readers.goes_imager_nc import (SCAN_DURATION, FULL_DISC, UNKNOWN_SECTOR) expected = { @@ -211,11 +216,13 @@ def test_end_time(self): class GOESNCFileHandlerTest(unittest.TestCase): + """Test the file handler.""" longMessage = True @mock.patch('satpy.readers.goes_imager_nc.xr') def setUp(self, xr_): + """Set up the tests.""" from satpy.readers.goes_imager_nc import GOESNCFileHandler, CALIB_COEFS self.coefs = CALIB_COEFS['GOES-15'] @@ -249,12 +256,10 @@ def setUp(self, xr_): filetype_info={}) def test_get_dataset_coords(self): - """Test whether coordinates returned by get_dataset() are correct""" - lon = self.reader.get_dataset(key=DatasetID(name='longitude', - calibration=None), + """Test whether coordinates returned by get_dataset() are correct.""" + lon = self.reader.get_dataset(key=make_dataid(name='longitude'), info={}) - lat = self.reader.get_dataset(key=DatasetID(name='latitude', - calibration=None), + lat = self.reader.get_dataset(key=make_dataid(name='latitude'), info={}) # ... this only compares the valid (unmasked) elements self.assertTrue(np.all(lat.to_masked_array() == self.lat), @@ -263,7 +268,7 @@ def test_get_dataset_coords(self): msg='get_dataset() returns invalid longitude') def test_get_dataset_counts(self): - """Test whether counts returned by get_dataset() are correct""" + """Test whether counts returned by get_dataset() are correct.""" from satpy.readers.goes_imager_nc import ALTITUDE, UNKNOWN_SECTOR self.reader.meta.update({'lon0': -75.0, @@ -288,7 +293,7 @@ def test_get_dataset_counts(self): for ch in self.channels: counts = self.reader.get_dataset( - key=DatasetID(name=ch, calibration='counts'), info={}) + key=make_dataid(name=ch, calibration='counts'), info={}) # ... this only compares the valid (unmasked) elements self.assertTrue(np.all(self.counts/32. == counts.to_masked_array()), msg='get_dataset() returns invalid counts for ' @@ -298,11 +303,10 @@ def test_get_dataset_counts(self): self.assertDictEqual(counts.attrs, attrs_exp) def test_get_dataset_masks(self): - """Test whether data and coordinates are masked consistently""" + """Test whether data and coordinates are masked consistently.""" # Requires that no element has been masked due to invalid # radiance/reflectance/BT (see setUp()). - lon = self.reader.get_dataset(key=DatasetID(name='longitude', - calibration=None), + lon = self.reader.get_dataset(key=make_dataid(name='longitude'), info={}) lon_mask = lon.to_masked_array().mask for ch in self.channels: @@ -310,7 +314,7 @@ def test_get_dataset_masks(self): 'brightness_temperature'): try: data = self.reader.get_dataset( - key=DatasetID(name=ch, calibration=calib), info={}) + key=make_dataid(name=ch, calibration=calib), info={}) except ValueError: continue data_mask = data.to_masked_array().mask @@ -319,27 +323,27 @@ def test_get_dataset_masks(self): 'masked {} in channel {}'.format(calib, ch)) def test_get_dataset_invalid(self): - """Test handling of invalid calibrations""" + """Test handling of invalid calibrations.""" # VIS -> BT - args = dict(key=DatasetID(name='00_7', - calibration='brightness_temperature'), + args = dict(key=make_dataid(name='00_7', + calibration='brightness_temperature'), info={}) self.assertRaises(ValueError, self.reader.get_dataset, **args) # IR -> Reflectance - args = dict(key=DatasetID(name='10_7', - calibration='reflectance'), + args = dict(key=make_dataid(name='10_7', + calibration='reflectance'), info={}) self.assertRaises(ValueError, self.reader.get_dataset, **args) # Unsupported calibration - args = dict(key=DatasetID(name='10_7', - calibration='invalid'), - info={}) - self.assertRaises(ValueError, self.reader.get_dataset, **args) + with pytest.raises(ValueError): + args = dict(key=make_dataid(name='10_7', + calibration='invalid'), + info={}) def test_calibrate(self): - """Test whether the correct calibration methods are called""" + """Test whether the correct calibration methods are called.""" for ch in self.channels: if self.reader._is_vis(ch): calibs = {'radiance': '_viscounts2radiance', @@ -354,7 +358,7 @@ def test_calibrate(self): target_func.assert_called() def test_get_sector(self): - """Test sector identification""" + """Test sector identification.""" from satpy.readers.goes_imager_nc import (FULL_DISC, NORTH_HEMIS_EAST, SOUTH_HEMIS_EAST, NORTH_HEMIS_WEST, SOUTH_HEMIS_WEST, UNKNOWN_SECTOR) @@ -388,10 +392,13 @@ def test_get_sector(self): class GOESNCEUMFileHandlerRadianceTest(unittest.TestCase): + """Tests for the radiances.""" + longMessage = True @mock.patch('satpy.readers.goes_imager_nc.xr') def setUp(self, xr_): + """Set up the tests.""" from satpy.readers.goes_imager_nc import GOESEUMNCFileHandler, CALIB_COEFS self.coefs = CALIB_COEFS['GOES-15'] @@ -426,17 +433,18 @@ def setUp(self, xr_): filetype_info={}, geo_data=geo_data) def test_get_dataset_radiance(self): + """Test getting the radiances.""" for ch in self.channels: if not self.reader._is_vis(ch): radiance = self.reader.get_dataset( - key=DatasetID(name=ch, calibration='radiance'), info={}) + key=make_dataid(name=ch, calibration='radiance'), info={}) # ... this only compares the valid (unmasked) elements self.assertTrue(np.all(self.radiance == radiance.to_masked_array()), msg='get_dataset() returns invalid radiance for ' 'channel {}'.format(ch)) def test_calibrate(self): - """Test whether the correct calibration methods are called""" + """Test whether the correct calibration methods are called.""" for ch in self.channels: if not self.reader._is_vis(ch): calibs = {'brightness_temperature': '_calibrate_ir'} @@ -447,7 +455,7 @@ def test_calibrate(self): target_func.assert_called() def test_get_sector(self): - """Test sector identification""" + """Test sector identification.""" from satpy.readers.goes_imager_nc import (FULL_DISC, NORTH_HEMIS_EAST, SOUTH_HEMIS_EAST, NORTH_HEMIS_WEST, SOUTH_HEMIS_WEST, UNKNOWN_SECTOR) @@ -468,10 +476,13 @@ def test_get_sector(self): class GOESNCEUMFileHandlerReflectanceTest(unittest.TestCase): + """Testing the reflectances.""" + longMessage = True @mock.patch('satpy.readers.goes_imager_nc.xr') def setUp(self, xr_): + """Set up the tests.""" from satpy.readers.goes_imager_nc import GOESEUMNCFileHandler, CALIB_COEFS self.coefs = CALIB_COEFS['GOES-15'] @@ -506,10 +517,11 @@ def setUp(self, xr_): filetype_info={}, geo_data=geo_data) def test_get_dataset_reflectance(self): + """Test getting the reflectance.""" for ch in self.channels: if self.reader._is_vis(ch): refl = self.reader.get_dataset( - key=DatasetID(name=ch, calibration='reflectance'), info={}) + key=make_dataid(name=ch, calibration='reflectance'), info={}) # ... this only compares the valid (unmasked) elements self.assertTrue(np.all(self.reflectance == refl.to_masked_array()), msg='get_dataset() returns invalid reflectance for ' diff --git a/satpy/tests/reader_tests/test_grib.py b/satpy/tests/reader_tests/test_grib.py index f87ab55c49..5ea7a1e388 100644 --- a/satpy/tests/reader_tests/test_grib.py +++ b/satpy/tests/reader_tests/test_grib.py @@ -19,16 +19,20 @@ import os import sys -import numpy as np -import xarray as xr import unittest from unittest import mock +import numpy as np +import xarray as xr + +from satpy.dataset import DataQuery + class FakeMessage(object): """Fake message returned by pygrib.open().message(x).""" def __init__(self, values, proj_params=None, latlons=None, **attrs): + """Init the message.""" super(FakeMessage, self).__init__() self.attrs = attrs self.values = values @@ -38,12 +42,15 @@ def __init__(self, values, proj_params=None, latlons=None, **attrs): self._latlons = latlons def latlons(self): + """Get coordinates.""" return self._latlons def __getitem__(self, item): + """Get item.""" return self.attrs[item] def valid_key(self, key): + """Validate key.""" return True @@ -51,6 +58,7 @@ class FakeGRIB(object): """Fake GRIB file returned by pygrib.open.""" def __init__(self, messages=None, proj_params=None, latlons=None): + """Init the grib file.""" super(FakeGRIB, self).__init__() if messages is not None: self._messages = messages @@ -129,27 +137,33 @@ def __init__(self, messages=None, proj_params=None, latlons=None): self.messages = len(self._messages) def message(self, msg_num): + """Get a message.""" return self._messages[msg_num - 1] def seek(self, loc): + """Seek.""" return def __iter__(self): + """Iterate.""" return iter(self._messages) def __enter__(self): + """Enter.""" return self def __exit__(self, exc_type, exc_val, exc_tb): + """Exit.""" pass class TestGRIBReader(unittest.TestCase): - """Test GRIB Reader""" + """Test GRIB Reader.""" + yaml_file = "grib.yaml" def setUp(self): - """Wrap pygrib to read fake data""" + """Wrap pygrib to read fake data.""" from satpy.config import config_search_paths self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file)) @@ -195,19 +209,18 @@ def test_file_pattern(self): @mock.patch('satpy.readers.grib.pygrib') def test_load_all(self, pg): - """Test loading all test datasets""" + """Test loading all test datasets.""" pg.open.return_value = FakeGRIB() from satpy.readers import load_reader - from satpy import DatasetID r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ 'gfs.t18z.sfluxgrbf106.grib2', ]) r.create_filehandlers(loadables) datasets = r.load([ - DatasetID(name='t', level=100), - DatasetID(name='t', level=200), - DatasetID(name='t', level=300)]) + DataQuery(name='t', level=100, modifiers=tuple()), + DataQuery(name='t', level=200, modifiers=tuple()), + DataQuery(name='t', level=300, modifiers=tuple())]) self.assertEqual(len(datasets), 3) for v in datasets.values(): self.assertEqual(v.attrs['units'], 'K') @@ -215,7 +228,7 @@ def test_load_all(self, pg): @mock.patch('satpy.readers.grib.pygrib') def test_load_all_lcc(self, pg): - """Test loading all test datasets with lcc projections""" + """Test loading all test datasets with lcc projections.""" lons = np.array([ [12.19, 0, 0, 0, 14.34208538], [0, 0, 0, 0, 0], @@ -235,16 +248,15 @@ def test_load_all_lcc(self, pg): 'lat_1': 25.0, 'lat_2': 25.0}, latlons=(lats, lons)) from satpy.readers import load_reader - from satpy import DatasetID r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ 'gfs.t18z.sfluxgrbf106.grib2', ]) r.create_filehandlers(loadables) datasets = r.load([ - DatasetID(name='t', level=100), - DatasetID(name='t', level=200), - DatasetID(name='t', level=300)]) + DataQuery(name='t', level=100, modifiers=tuple()), + DataQuery(name='t', level=200, modifiers=tuple()), + DataQuery(name='t', level=300, modifiers=tuple())]) self.assertEqual(len(datasets), 3) for v in datasets.values(): self.assertEqual(v.attrs['units'], 'K') diff --git a/satpy/tests/reader_tests/test_hsaf_grib.py b/satpy/tests/reader_tests/test_hsaf_grib.py index 5f2120e8be..c0be432ef1 100644 --- a/satpy/tests/reader_tests/test_hsaf_grib.py +++ b/satpy/tests/reader_tests/test_hsaf_grib.py @@ -18,15 +18,20 @@ """Module for testing the satpy.readers.grib module.""" import sys -import numpy as np import unittest -from unittest import mock from datetime import datetime +from unittest import mock + +import numpy as np + +from satpy.tests.utils import make_dataid class FakeMessage(object): """Fake message returned by pygrib.open().message(x).""" + def __init__(self, values, proj_params=None, latlons=None, **attrs): + """Init the fake message.""" super(FakeMessage, self).__init__() self.attrs = attrs self.values = values @@ -37,18 +42,23 @@ def __init__(self, values, proj_params=None, latlons=None, **attrs): self._latlons = latlons def latlons(self): + """Get the latlons.""" return self._latlons def __getitem__(self, item): + """Get item.""" return self.attrs[item] def valid_key(self, key): + """Check if key is valid.""" return True class FakeGRIB(object): """Fake GRIB file returned by pygrib.open.""" + def __init__(self, messages=None, proj_params=None, latlons=None): + """Init the fake grib file.""" super(FakeGRIB, self).__init__() if messages is not None: self._messages = messages @@ -82,26 +92,31 @@ def __init__(self, messages=None, proj_params=None, latlons=None): self.messages = len(self._messages) def message(self, msg_num): + """Fake message.""" return self._messages[msg_num - 1] def seek(self, loc): + """Fake seek.""" return def __iter__(self): + """Iterate over messages.""" return iter(self._messages) def __enter__(self): + """Enter the context.""" return self def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the context.""" pass class TestHSAFFileHandler(unittest.TestCase): - """Test HSAF Reader""" + """Test HSAF Reader.""" def setUp(self): - """Wrap pygrib to read fake data""" + """Wrap pygrib to read fake data.""" try: import pygrib except ImportError: @@ -115,10 +130,7 @@ def tearDown(self): @mock.patch('satpy.readers.hsaf_grib.pygrib.open', return_value=FakeGRIB()) def test_init(self, pg): - """ - Test the init function, ensure that the correct dates and metadata - are returned - """ + """Test the init function, ensure that the correct dates and metadata are returned.""" pg.open.return_value = FakeGRIB() correct_dt = datetime(2019, 6, 3, 16, 45, 0) from satpy.readers.hsaf_grib import HSAFFileHandler @@ -130,36 +142,30 @@ def test_init(self, pg): @mock.patch('satpy.readers.hsaf_grib.pygrib.open', return_value=FakeGRIB()) def test_get_area_def(self, pg): - """ - Test the area definition setup, checks the size and extent - """ + """Test the area definition setup, checks the size and extent.""" pg.open.return_value = FakeGRIB() from satpy.readers.hsaf_grib import HSAFFileHandler fh = HSAFFileHandler('filename', mock.MagicMock(), mock.MagicMock()) area_def = HSAFFileHandler.get_area_def(fh, 'H03B') - self.assertEqual(area_def.x_size, 3712) + self.assertEqual(area_def.width, 3712) self.assertAlmostEqual(area_def.area_extent[0], -5569209.3026, places=3) self.assertAlmostEqual(area_def.area_extent[3], 5587721.9097, places=3) @mock.patch('satpy.readers.hsaf_grib.pygrib.open', return_value=FakeGRIB()) def test_get_dataset(self, pg): - """ - Test reading the actual datasets from a grib file - """ + """Test reading the actual datasets from a grib file.""" pg.open.return_value = FakeGRIB() from satpy.readers.hsaf_grib import HSAFFileHandler # Instantaneous precipitation fh = HSAFFileHandler('filename', mock.MagicMock(), mock.MagicMock()) fh.filename = "H03B" - ds_id = mock.Mock() - ds_id.name = 'H03B' + ds_id = make_dataid(name='H03B') data = fh.get_dataset(ds_id, mock.Mock()) np.testing.assert_array_equal(data.values, np.arange(25.).reshape((5, 5))) # Accumulated precipitation fh = HSAFFileHandler('filename', mock.MagicMock(), mock.MagicMock()) fh.filename = "H05B" - ds_id = mock.Mock() - ds_id.name = 'H05B' + ds_id = make_dataid(name='H05B') data = fh.get_dataset(ds_id, mock.Mock()) np.testing.assert_array_equal(data.values, np.arange(25.).reshape((5, 5))) diff --git a/satpy/tests/reader_tests/test_hy2_scat_l2b_h5.py b/satpy/tests/reader_tests/test_hy2_scat_l2b_h5.py index af6e81fada..1755a7c9df 100644 --- a/satpy/tests/reader_tests/test_hy2_scat_l2b_h5.py +++ b/satpy/tests/reader_tests/test_hy2_scat_l2b_h5.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Module for testing the satpy.readers.hy2_scat_l2b_h5 module. -""" +"""Module for testing the satpy.readers.hy2_scat_l2b_h5 module.""" import os import numpy as np @@ -38,7 +37,7 @@ class FakeHDF5FileHandler2(FakeHDF5FileHandler): - """Swap-in HDF5 File Handler""" + """Swap-in HDF5 File Handler.""" def _get_geo_data(self, num_rows, num_cols): geo = { @@ -327,6 +326,7 @@ def get_test_content(self, filename, filename_info, filetype_info): class TestHY2SCATL2BH5Reader(unittest.TestCase): """Test HY2 Scatterometer L2B H5 Reader.""" + yaml_file = "hy2_scat_l2b_h5.yaml" def setUp(self): diff --git a/satpy/tests/reader_tests/test_iasi_l2.py b/satpy/tests/reader_tests/test_iasi_l2.py index 24f643e56a..fc88f7168b 100644 --- a/satpy/tests/reader_tests/test_iasi_l2.py +++ b/satpy/tests/reader_tests/test_iasi_l2.py @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . -"""Unit tests for IASI L2 reader""" +"""Unit tests for IASI L2 reader.""" import os import unittest @@ -113,7 +113,7 @@ def save_test_data(path): - """Save the test to the indicated directory""" + """Save the test to the indicated directory.""" import h5py with h5py.File(os.path.join(path, FNAME), 'w') as fid: # Create groups @@ -153,7 +153,7 @@ def setUp(self): self.reader = IASIL2HDF5(self.fname, self.fname_info, self.ftype_info) def tearDown(self): - """Remove the temporary directory created for a test""" + """Remove the temporary directory created for a test.""" try: import shutil shutil.rmtree(self.base_dir, ignore_errors=True) @@ -161,7 +161,7 @@ def tearDown(self): pass def test_scene(self): - """Test scene creation""" + """Test scene creation.""" from satpy import Scene fname = os.path.join(self.base_dir, FNAME) scn = Scene(reader='iasi_l2', filenames=[fname]) @@ -171,14 +171,14 @@ def test_scene(self): self.assertTrue('iasi' in scn.attrs['sensor']) def test_scene_load_available_datasets(self): - """Test that all datasets are available""" + """Test that all datasets are available.""" from satpy import Scene fname = os.path.join(self.base_dir, FNAME) scn = Scene(reader='iasi_l2', filenames=[fname]) scn.load(scn.available_dataset_names()) def test_scene_load_pressure(self): - """Test loading pressure data""" + """Test loading pressure data.""" from satpy import Scene fname = os.path.join(self.base_dir, FNAME) scn = Scene(reader='iasi_l2', filenames=[fname]) @@ -187,7 +187,7 @@ def test_scene_load_pressure(self): self.check_pressure(pres, scn.attrs) def test_scene_load_emissivity(self): - """Test loading emissivity data""" + """Test loading emissivity data.""" from satpy import Scene fname = os.path.join(self.base_dir, FNAME) scn = Scene(reader='iasi_l2', filenames=[fname]) @@ -196,7 +196,7 @@ def test_scene_load_emissivity(self): self.check_emissivity(emis) def test_scene_load_sensing_times(self): - """Test loading sensing times""" + """Test loading sensing times.""" from satpy import Scene fname = os.path.join(self.base_dir, FNAME) scn = Scene(reader='iasi_l2', filenames=[fname]) @@ -205,7 +205,7 @@ def test_scene_load_sensing_times(self): self.check_sensing_times(times) def test_init(self): - """Test reader initialization""" + """Test reader initialization.""" self.assertEqual(self.reader.filename, self.fname) self.assertEqual(self.reader.finfo, self.fname_info) self.assertTrue(self.reader.lons is None) @@ -214,29 +214,32 @@ def test_init(self): self.assertEqual(self.reader.mda['sensor'], 'iasi') def test_time_properties(self): - """Test time properties""" + """Test time properties.""" import datetime as dt self.assertTrue(isinstance(self.reader.start_time, dt.datetime)) self.assertTrue(isinstance(self.reader.end_time, dt.datetime)) def test_get_dataset(self): - """Test get_dataset() for different datasets""" - from satpy import DatasetID + """Test get_dataset() for different datasets.""" + from satpy.tests.utils import make_dataid info = {'eggs': 'spam'} - key = DatasetID(name='pressure') + key = make_dataid(name='pressure') data = self.reader.get_dataset(key, info).compute() self.check_pressure(data) self.assertTrue('eggs' in data.attrs) self.assertEqual(data.attrs['eggs'], 'spam') - key = DatasetID(name='emissivity') + key = make_dataid(name='emissivity') data = self.reader.get_dataset(key, info).compute() self.check_emissivity(data) - key = DatasetID(name='sensing_time') + key = make_dataid(name='sensing_time') data = self.reader.get_dataset(key, info).compute() self.assertEqual(data.shape, (NUM_SCANLINES, SCAN_WIDTH)) def check_pressure(self, pres, attrs=None): - """Helper method for testing reading pressure dataset""" + """Test reading pressure dataset. + + Helper function. + """ self.assertTrue(np.all(pres == 0.0)) self.assertEqual(pres.x.size, SCAN_WIDTH) self.assertEqual(pres.y.size, NUM_SCANLINES) @@ -248,14 +251,20 @@ def check_pressure(self, pres, attrs=None): self.assertTrue('units' in pres.attrs) def check_emissivity(self, emis): - """Helper method for testing reading emissivity dataset.""" + """Test reading emissivity dataset. + + Helper function. + """ self.assertTrue(np.all(emis == 0.0)) self.assertEqual(emis.x.size, SCAN_WIDTH) self.assertEqual(emis.y.size, NUM_SCANLINES) self.assertTrue('emissivity_wavenumbers' in emis.attrs) def check_sensing_times(self, times): - """Helper method for testing reading sensing times""" + """Test reading sensing times. + + Helper function. + """ # Times should be equal in blocks of four, but not beyond, so # there should be SCAN_WIDTH/4 different values for i in range(int(SCAN_WIDTH / 4)): @@ -263,37 +272,37 @@ def check_sensing_times(self, times): self.assertEqual(np.unique(times[0, :]).size, SCAN_WIDTH / 4) def test_read_dataset(self): - """Test read_dataset() function""" + """Test read_dataset() function.""" import h5py from satpy.readers.iasi_l2 import read_dataset - from satpy import DatasetID + from satpy.tests.utils import make_dataid with h5py.File(self.fname, 'r') as fid: - key = DatasetID(name='pressure') + key = make_dataid(name='pressure') data = read_dataset(fid, key).compute() self.check_pressure(data) - key = DatasetID(name='emissivity') + key = make_dataid(name='emissivity') data = read_dataset(fid, key).compute() self.check_emissivity(data) # This dataset doesn't have any attributes - key = DatasetID(name='ozone_total_column') + key = make_dataid(name='ozone_total_column') data = read_dataset(fid, key).compute() self.assertEqual(len(data.attrs), 0) def test_read_geo(self): - """Test read_geo() function""" + """Test read_geo() function.""" import h5py from satpy.readers.iasi_l2 import read_geo - from satpy import DatasetID + from satpy.tests.utils import make_dataid with h5py.File(self.fname, 'r') as fid: - key = DatasetID(name='sensing_time') + key = make_dataid(name='sensing_time') data = read_geo(fid, key).compute() self.assertEqual(data.shape, (NUM_SCANLINES, SCAN_WIDTH)) - key = DatasetID(name='latitude') + key = make_dataid(name='latitude') data = read_geo(fid, key).compute() self.assertEqual(data.shape, (NUM_SCANLINES, SCAN_WIDTH)) def test_form_datetimes(self): - """Test _form_datetimes() function""" + """Test _form_datetimes() function.""" from satpy.readers.iasi_l2 import _form_datetimes days = TEST_DATA['L1C']['SensingTime_day']['data'] msecs = TEST_DATA['L1C']['SensingTime_msec']['data'] diff --git a/satpy/tests/reader_tests/test_mersi2_l1b.py b/satpy/tests/reader_tests/test_mersi2_l1b.py index 1f88c1e664..cbfabc300b 100644 --- a/satpy/tests/reader_tests/test_mersi2_l1b.py +++ b/satpy/tests/reader_tests/test_mersi2_l1b.py @@ -18,6 +18,7 @@ """Tests for the 'mersi2_l1b' reader.""" import os import unittest +import pytest from unittest import mock import numpy as np @@ -30,6 +31,7 @@ class FakeHDF5FileHandler2(FakeHDF5FileHandler): """Swap-in HDF5 File Handler.""" def make_test_data(self, dims): + """Make test data.""" return xr.DataArray(da.from_array(np.ones([dim for dim in dims], dtype=np.float32) * 10, [dim for dim in dims])) def _get_calibration(self, num_scans, rows_per_scan): @@ -235,6 +237,7 @@ def get_test_content(self, filename, filename_info, filetype_info): class TestMERSI2L1BReader(unittest.TestCase): """Test MERSI2 L1B Reader.""" + yaml_file = "mersi2_l1b.yaml" def setUp(self): @@ -253,7 +256,7 @@ def tearDown(self): def test_fy3d_all_resolutions(self): """Test loading data when all resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid from satpy.readers import load_reader, get_key filenames = [ 'tf2019071182739.FY3D-X_MERSI_0250M_L1B.HDF', @@ -278,11 +281,11 @@ def test_fy3d_all_resolutions(self): num_results = 2 else: num_results = 3 - ds_id = DatasetID(name=band_name, resolution=250) + ds_id = make_dataid(name=band_name, resolution=250) res = get_key(ds_id, available_datasets, num_results=num_results, best=False) self.assertEqual(num_results, len(res)) - ds_id = DatasetID(name=band_name, resolution=1000) + ds_id = make_dataid(name=band_name, resolution=1000) res = get_key(ds_id, available_datasets, num_results=num_results, best=False) self.assertEqual(num_results, len(res)) @@ -316,7 +319,7 @@ def test_fy3d_all_resolutions(self): def test_fy3d_counts_calib(self): """Test loading data at counts calibration.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid from satpy.readers import load_reader filenames = [ 'tf2019071182739.FY3D-X_MERSI_0250M_L1B.HDF', @@ -333,7 +336,7 @@ def test_fy3d_counts_calib(self): ds_ids = [] for band_name in ['1', '2', '3', '4', '5', '20', '24', '25']: - ds_ids.append(DatasetID(name=band_name, calibration='counts')) + ds_ids.append(make_dataid(name=band_name, calibration='counts')) res = reader.load(ds_ids) self.assertEqual(8, len(res)) self.assertEqual((2 * 40, 2048 * 2), res['1'].shape) @@ -371,7 +374,7 @@ def test_fy3d_counts_calib(self): def test_fy3d_rad_calib(self): """Test loading data at radiance calibration.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid from satpy.readers import load_reader filenames = [ 'tf2019071182739.FY3D-X_MERSI_0250M_L1B.HDF', @@ -388,7 +391,7 @@ def test_fy3d_rad_calib(self): ds_ids = [] for band_name in ['1', '2', '3', '4', '5']: - ds_ids.append(DatasetID(name=band_name, calibration='radiance')) + ds_ids.append(make_dataid(name=band_name, calibration='radiance')) res = reader.load(ds_ids) self.assertEqual(5, len(res)) self.assertEqual((2 * 40, 2048 * 2), res['1'].shape) @@ -409,7 +412,7 @@ def test_fy3d_rad_calib(self): def test_fy3d_1km_resolutions(self): """Test loading data when only 1km resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid from satpy.readers import load_reader, get_key filenames = [ 'tf2019071182739.FY3D-X_MERSI_1000M_L1B.HDF', @@ -432,11 +435,11 @@ def test_fy3d_1km_resolutions(self): num_results = 2 else: num_results = 3 - ds_id = DatasetID(name=band_name, resolution=250) - res = get_key(ds_id, available_datasets, - num_results=num_results, best=False) - self.assertEqual(0, len(res)) - ds_id = DatasetID(name=band_name, resolution=1000) + ds_id = make_dataid(name=band_name, resolution=250) + with pytest.raises(KeyError): + res = get_key(ds_id, available_datasets, + num_results=num_results, best=False) + ds_id = make_dataid(name=band_name, resolution=1000) res = get_key(ds_id, available_datasets, num_results=num_results, best=False) self.assertEqual(num_results, len(res)) @@ -470,7 +473,7 @@ def test_fy3d_1km_resolutions(self): def test_fy3d_250_resolutions(self): """Test loading data when only 250m resolutions are available.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid from satpy.readers import load_reader, get_key filenames = [ 'tf2019071182739.FY3D-X_MERSI_0250M_L1B.HDF', @@ -493,14 +496,14 @@ def test_fy3d_250_resolutions(self): num_results = 2 else: num_results = 3 - ds_id = DatasetID(name=band_name, resolution=250) + ds_id = make_dataid(name=band_name, resolution=250) res = get_key(ds_id, available_datasets, num_results=num_results, best=False) self.assertEqual(num_results, len(res)) - ds_id = DatasetID(name=band_name, resolution=1000) - res = get_key(ds_id, available_datasets, - num_results=num_results, best=False) - self.assertEqual(0, len(res)) + ds_id = make_dataid(name=band_name, resolution=1000) + with pytest.raises(KeyError): + res = get_key(ds_id, available_datasets, + num_results=num_results, best=False) res = reader.load(['1', '2', '3', '4', '5', '20', '24', '25']) self.assertEqual(6, len(res)) diff --git a/satpy/tests/reader_tests/test_modis_l2.py b/satpy/tests/reader_tests/test_modis_l2.py index 7dc922f6ec..6a90586169 100644 --- a/satpy/tests/reader_tests/test_modis_l2.py +++ b/satpy/tests/reader_tests/test_modis_l2.py @@ -161,7 +161,7 @@ def test_scene_available_datasets(self): def test_load_longitude_latitude(self): """Test that longitude and latitude datasets are loaded correctly.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid def test_func(dname, x, y): if dname == 'longitude': @@ -176,46 +176,46 @@ def test_func(dname, x, y): for dataset_name in ['longitude', 'latitude']: # Default resolution should be the interpolated 1km scene.load([dataset_name]) - longitude_1km_id = DatasetID(name=dataset_name, resolution=1000) + longitude_1km_id = make_dataid(name=dataset_name, resolution=1000) longitude_1km = scene[longitude_1km_id] self.assertEqual(longitude_1km.shape, (5*SCAN_WIDTH, 5*SCAN_LEN+4)) test_func(dataset_name, longitude_1km.values, 0) # Specify original 5km scale scene.load([dataset_name], resolution=5000) - longitude_5km_id = DatasetID(name=dataset_name, resolution=5000) + longitude_5km_id = make_dataid(name=dataset_name, resolution=5000) longitude_5km = scene[longitude_5km_id] self.assertEqual(longitude_5km.shape, TEST_DATA[dataset_name.capitalize()]['data'].shape) test_func(dataset_name, longitude_5km.values, 0) def test_load_quality_assurance(self): """Test loading quality assurance.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid scene = Scene(reader='modis_l2', filenames=[self.file_name]) dataset_name = 'quality_assurance' scene.load([dataset_name]) - quality_assurance_id = DatasetID(name=dataset_name, resolution=1000) + quality_assurance_id = make_dataid(name=dataset_name, resolution=1000) self.assertIn(quality_assurance_id, scene.datasets) quality_assurance = scene[quality_assurance_id] self.assertEqual(quality_assurance.shape, (5*SCAN_WIDTH, 5*SCAN_LEN+4)) def test_load_1000m_cloud_mask_dataset(self): """Test loading 1000m cloud mask.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid scene = Scene(reader='modis_l2', filenames=[self.file_name]) dataset_name = 'cloud_mask' scene.load([dataset_name], resolution=1000) - cloud_mask_id = DatasetID(name=dataset_name, resolution=1000) + cloud_mask_id = make_dataid(name=dataset_name, resolution=1000) self.assertIn(cloud_mask_id, scene.datasets) cloud_mask = scene[cloud_mask_id] self.assertEqual(cloud_mask.shape, (5*SCAN_WIDTH, 5*SCAN_LEN+4)) def test_load_250m_cloud_mask_dataset(self): """Test loading 250m cloud mask.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid scene = Scene(reader='modis_l2', filenames=[self.file_name]) dataset_name = 'cloud_mask' scene.load([dataset_name], resolution=250) - cloud_mask_id = DatasetID(name=dataset_name, resolution=250) + cloud_mask_id = make_dataid(name=dataset_name, resolution=250) self.assertIn(cloud_mask_id, scene.datasets) cloud_mask = scene[cloud_mask_id] self.assertEqual(cloud_mask.shape, (4*5*SCAN_WIDTH, 4*(5*SCAN_LEN+4))) diff --git a/satpy/tests/reader_tests/test_nc_slstr.py b/satpy/tests/reader_tests/test_nc_slstr.py index 5e8cac7c5c..d56e2daa8d 100644 --- a/satpy/tests/reader_tests/test_nc_slstr.py +++ b/satpy/tests/reader_tests/test_nc_slstr.py @@ -18,6 +18,49 @@ """Module for testing the satpy.readers.nc_slstr module.""" import unittest import unittest.mock as mock +from satpy.dataset import WavelengthRange, ModifierTuple, DataID + +local_id_keys_config = {'name': { + 'required': True, +}, + 'wavelength': { + 'type': WavelengthRange, +}, + 'resolution': None, + 'calibration': { + 'enum': [ + 'reflectance', + 'brightness_temperature', + 'radiance', + 'counts' + ] +}, + 'stripe': { + 'enum': [ + 'a', + 'b', + 'c', + 'i', + 'f', + ] + }, + 'view': { + 'enum': [ + 'nadir', + 'oblique', + ] + }, + 'modifiers': { + 'required': True, + 'default': ModifierTuple(), + 'type': ModifierTuple, +}, +} + + +def make_dataid(**items): + """Make a data id.""" + return DataID(local_id_keys_config, **items) class TestSLSTRReader(unittest.TestCase): @@ -27,27 +70,30 @@ class TestSLSTRReader(unittest.TestCase): def test_instantiate(self, mocked_dataset): """Test initialization of file handlers.""" from satpy.readers.slstr_l1b import NCSLSTR1B, NCSLSTRGeo, NCSLSTRAngles, NCSLSTRFlag - from satpy import DatasetID - - ds_id = DatasetID(name='foo') - filename_info = {'mission_id': 'S3A', 'dataset_name': 'foo', 'start_time': 0, 'end_time': 0} + ds_id = make_dataid(name='foo', calibration='radiance', stripe='a', view='nadir') + filename_info = {'mission_id': 'S3A', 'dataset_name': 'foo', 'start_time': 0, 'end_time': 0, + 'stripe': 'a', 'view': 'n'} test = NCSLSTR1B('somedir/S1_radiance_an.nc', filename_info, 'c') - assert(test.view == 'n') + assert(test.view == 'nadir') assert(test.stripe == 'a') - test.get_dataset(ds_id, filename_info) + test.get_dataset(ds_id, dict(filename_info, **{'file_key': 'foo'})) mocked_dataset.assert_called() mocked_dataset.reset_mock() + filename_info = {'mission_id': 'S3A', 'dataset_name': 'foo', 'start_time': 0, 'end_time': 0, + 'stripe': 'c', 'view': 'o'} test = NCSLSTR1B('somedir/S1_radiance_co.nc', filename_info, 'c') - assert(test.view == 'o') + assert(test.view == 'oblique') assert(test.stripe == 'c') - test.get_dataset(ds_id, filename_info) + test.get_dataset(ds_id, dict(filename_info, **{'file_key': 'foo'})) mocked_dataset.assert_called() mocked_dataset.reset_mock() + filename_info = {'mission_id': 'S3A', 'dataset_name': 'foo', 'start_time': 0, 'end_time': 0, + 'stripe': 'a', 'view': 'n'} test = NCSLSTRGeo('somedir/S1_radiance_an.nc', filename_info, 'c') - test.get_dataset(ds_id, filename_info) + test.get_dataset(ds_id, dict(filename_info, **{'file_key': 'foo'})) mocked_dataset.assert_called() mocked_dataset.reset_mock() @@ -58,7 +104,7 @@ def test_instantiate(self, mocked_dataset): mocked_dataset.reset_mock() test = NCSLSTRFlag('somedir/S1_radiance_an.nc', filename_info, 'c') - assert(test.view == 'n') + assert(test.view == 'nadir') assert(test.stripe == 'a') mocked_dataset.assert_called() mocked_dataset.reset_mock() diff --git a/satpy/tests/reader_tests/test_nwcsaf_msg.py b/satpy/tests/reader_tests/test_nwcsaf_msg.py index f461991be6..fed0715d1d 100644 --- a/satpy/tests/reader_tests/test_nwcsaf_msg.py +++ b/satpy/tests/reader_tests/test_nwcsaf_msg.py @@ -482,11 +482,11 @@ def fill_h5(root, stuff): def test_get_area_def(self): """Get the area definition.""" from satpy.readers.nwcsaf_msg2013_hdf5 import Hdf5NWCSAF - from satpy import DatasetID + from satpy.tests.utils import make_dataid filename_info = {} filetype_info = {} - dsid = DatasetID(name="ct") + dsid = make_dataid(name="ct") test = Hdf5NWCSAF(self.filename_ct, filename_info, filetype_info) area_def = test.get_area_def(dsid) @@ -509,11 +509,11 @@ def test_get_area_def(self): def test_get_dataset(self): """Retrieve datasets from a NWCSAF msgv2013 hdf5 file.""" from satpy.readers.nwcsaf_msg2013_hdf5 import Hdf5NWCSAF - from satpy import DatasetID + from satpy.tests.utils import make_dataid filename_info = {} filetype_info = {} - dsid = DatasetID(name="ct") + dsid = make_dataid(name="ct") test = Hdf5NWCSAF(self.filename_ct, filename_info, filetype_info) ds = test.get_dataset(dsid, {"file_key": "CT"}) self.assertEqual(ds.shape, (1856, 3712)) @@ -522,7 +522,7 @@ def test_get_dataset(self): filename_info = {} filetype_info = {} - dsid = DatasetID(name="ctth_alti") + dsid = make_dataid(name="ctth_alti") test = Hdf5NWCSAF(self.filename_ctth, filename_info, filetype_info) ds = test.get_dataset(dsid, {"file_key": "CTTH_HEIGHT"}) self.assertEqual(ds.shape, (1856, 3712)) @@ -531,7 +531,7 @@ def test_get_dataset(self): filename_info = {} filetype_info = {} - dsid = DatasetID(name="ctth_pres") + dsid = make_dataid(name="ctth_pres") test = Hdf5NWCSAF(self.filename_ctth, filename_info, filetype_info) ds = test.get_dataset(dsid, {"file_key": "CTTH_PRESS"}) self.assertEqual(ds.shape, (1856, 3712)) @@ -540,7 +540,7 @@ def test_get_dataset(self): filename_info = {} filetype_info = {} - dsid = DatasetID(name="ctth_tempe") + dsid = make_dataid(name="ctth_tempe") test = Hdf5NWCSAF(self.filename_ctth, filename_info, filetype_info) ds = test.get_dataset(dsid, {"file_key": "CTTH_TEMPER"}) self.assertEqual(ds.shape, (1856, 3712)) diff --git a/satpy/tests/reader_tests/test_olci_nc.py b/satpy/tests/reader_tests/test_olci_nc.py index 29c5f445b1..0597d5c82a 100644 --- a/satpy/tests/reader_tests/test_olci_nc.py +++ b/satpy/tests/reader_tests/test_olci_nc.py @@ -28,7 +28,7 @@ def test_instantiate(self, mocked_dataset): """Test initialization of file handlers.""" from satpy.readers.olci_nc import (NCOLCIBase, NCOLCICal, NCOLCIGeo, NCOLCIChannelBase, NCOLCI1B, NCOLCI2) - from satpy import DatasetID + from satpy.tests.utils import make_dataid import xarray as xr cal_data = xr.Dataset( @@ -39,8 +39,8 @@ def test_instantiate(self, mocked_dataset): {'bands': [0, 1, 2], }, ) - ds_id = DatasetID(name='Oa01', calibration='reflectance') - ds_id2 = DatasetID(name='wsqf', calibration='reflectance') + ds_id = make_dataid(name='Oa01', calibration='reflectance') + ds_id2 = make_dataid(name='wsqf', calibration='reflectance') filename_info = {'mission_id': 'S3A', 'dataset_name': 'Oa01', 'start_time': 0, 'end_time': 0} test = NCOLCIBase('somedir/somefile.nc', filename_info, 'c') @@ -80,14 +80,14 @@ def test_instantiate(self, mocked_dataset): def test_get_dataset(self, mocked_dataset): """Test reading datasets.""" from satpy.readers.olci_nc import NCOLCI2 - from satpy import DatasetID + from satpy.tests.utils import make_dataid import numpy as np import xarray as xr mocked_dataset.return_value = xr.Dataset({'mask': (['rows', 'columns'], np.array([1 << x for x in range(30)]).reshape(5, 6))}, coords={'rows': np.arange(5), 'columns': np.arange(6)}) - ds_id = DatasetID(name='mask') + ds_id = make_dataid(name='mask') filename_info = {'mission_id': 'S3A', 'dataset_name': 'mask', 'start_time': 0, 'end_time': 0} test = NCOLCI2('somedir/somefile.nc', filename_info, 'c') res = test.get_dataset(ds_id, {'nc_key': 'mask'}) @@ -97,7 +97,7 @@ def test_get_dataset(self, mocked_dataset): def test_olci_angles(self, mocked_dataset): """Test reading datasets.""" from satpy.readers.olci_nc import NCOLCIAngles - from satpy import DatasetID + from satpy.tests.utils import make_dataid import numpy as np import xarray as xr attr_dict = { @@ -117,8 +117,8 @@ def test_olci_angles(self, mocked_dataset): attrs=attr_dict) filename_info = {'mission_id': 'S3A', 'dataset_name': 'Oa01', 'start_time': 0, 'end_time': 0} - ds_id = DatasetID(name='solar_azimuth_angle') - ds_id2 = DatasetID(name='satellite_zenith_angle') + ds_id = make_dataid(name='solar_azimuth_angle') + ds_id2 = make_dataid(name='satellite_zenith_angle') test = NCOLCIAngles('somedir/somefile.nc', filename_info, 'c') test.get_dataset(ds_id, filename_info) test.get_dataset(ds_id2, filename_info) @@ -129,7 +129,7 @@ def test_olci_angles(self, mocked_dataset): def test_olci_meteo(self, mocked_dataset): """Test reading datasets.""" from satpy.readers.olci_nc import NCOLCIMeteo - from satpy import DatasetID + from satpy.tests.utils import make_dataid import numpy as np import xarray as xr attr_dict = { @@ -150,8 +150,8 @@ def test_olci_meteo(self, mocked_dataset): attrs=attr_dict) filename_info = {'mission_id': 'S3A', 'dataset_name': 'humidity', 'start_time': 0, 'end_time': 0} - ds_id = DatasetID(name='humidity') - ds_id2 = DatasetID(name='total_ozone') + ds_id = make_dataid(name='humidity') + ds_id2 = make_dataid(name='total_ozone') test = NCOLCIMeteo('somedir/somefile.nc', filename_info, 'c') test.get_dataset(ds_id, filename_info) test.get_dataset(ds_id2, filename_info) diff --git a/satpy/tests/reader_tests/test_safe_sar_l2_ocn.py b/satpy/tests/reader_tests/test_safe_sar_l2_ocn.py index e4039b4d28..d50c267fc5 100644 --- a/satpy/tests/reader_tests/test_safe_sar_l2_ocn.py +++ b/satpy/tests/reader_tests/test_safe_sar_l2_ocn.py @@ -20,15 +20,17 @@ import unittest.mock as mock import numpy as np import xarray as xr -from satpy import DatasetID +from satpy.tests.utils import make_dataid class TestSAFENC(unittest.TestCase): """Test various SAFE SAR L2 OCN file handlers.""" + @mock.patch('satpy.readers.safe_sar_l2_ocn.xr') @mock.patch.multiple('satpy.readers.safe_sar_l2_ocn.SAFENC', __abstractmethods__=set()) def setUp(self, xr_): + """Set up the tests.""" from satpy.readers.safe_sar_l2_ocn import SAFENC self.channels = ['owiWindSpeed', 'owiLon', 'owiLat', 'owiHs', 'owiNrcs', 'foo', @@ -63,36 +65,18 @@ def setUp(self, xr_): filetype_info={}) def test_init(self): - """Tests reader initialization""" + """Test reader initialization.""" self.assertEqual(self.reader.start_time, 0) self.assertEqual(self.reader.end_time, 0) self.assertEqual(self.reader.fstart_time, 0) self.assertEqual(self.reader.fend_time, 0) def test_get_dataset(self): + """Test getting a dataset.""" for ch in self.channels: dt = self.reader.get_dataset( - key=DatasetID(name=ch), info={}) + key=make_dataid(name=ch), info={}) # ... this only compares the valid (unmasked) elements self.assertTrue(np.all(self.nc[ch] == dt.to_masked_array()), msg='get_dataset() returns invalid data for ' 'dataset {}'.format(ch)) - -# @mock.patch('xarray.open_dataset') -# def test_init(self, mocked_dataset): -# """Test basic init with no extra parameters.""" -# from satpy.readers.safe_sar_l2_ocn import SAFENC -# from satpy import DatasetID -# -# print(mocked_dataset) -# ds_id = DatasetID(name='foo') -# filename_info = {'mission_id': 'S3A', 'product_type': 'foo', -# 'start_time': 0, 'end_time': 0, -# 'fstart_time': 0, 'fend_time': 0, -# 'polarization': 'vv'} -# -# test = SAFENC('S1A_IW_OCN__2SDV_20190228T075834_20190228T075849_026127_02EA43_8846.SAFE/measurement/' -# 's1a-iw-ocn-vv-20190228t075741-20190228t075800-026127-02EA43-001.nc', filename_info, 'c') -# print(test) -# mocked_dataset.assert_called() -# test.get_dataset(ds_id, filename_info) diff --git a/satpy/tests/reader_tests/test_scmi.py b/satpy/tests/reader_tests/test_scmi.py index d061945ff2..684e15248b 100644 --- a/satpy/tests/reader_tests/test_scmi.py +++ b/satpy/tests/reader_tests/test_scmi.py @@ -24,7 +24,10 @@ class FakeDataset(object): + """Fake dataset.""" + def __init__(self, info, attrs, dims=None): + """Init the dataset.""" for var_name, var_data in list(info.items()): if isinstance(var_data, np.ndarray): info[var_name] = xr.DataArray(var_data) @@ -33,15 +36,19 @@ def __init__(self, info, attrs, dims=None): self.dims = dims or {} def __getitem__(self, key): + """Get item.""" return self.info.get(key, self.dims.get(key)) def __contains__(self, key): + """Check contains.""" return key in self.info or key in self.dims def rename(self, *args, **kwargs): + """Rename the dataset.""" return self def close(self): + """Close the dataset.""" return @@ -50,7 +57,7 @@ class TestSCMIFileHandler(unittest.TestCase): @mock.patch('satpy.readers.scmi.xr') def setUp(self, xr_): - """Setup for test.""" + """Set up for test.""" from satpy.readers.scmi import SCMIFileHandler rad_data = (np.arange(10.).reshape((2, 5)) + 1.) rad_data = (rad_data + 1.) / 0.5 @@ -95,19 +102,19 @@ def setUp(self, xr_): def test_basic_attributes(self): """Test getting basic file attributes.""" from datetime import datetime - from satpy import DatasetID + from satpy.tests.utils import make_dataid self.assertEqual(self.reader.start_time, datetime(2017, 7, 29, 12, 0, 0, 0)) self.assertEqual(self.reader.end_time, datetime(2017, 7, 29, 12, 0, 0, 0)) - self.assertEqual(self.reader.get_shape(DatasetID(name='C05'), {}), + self.assertEqual(self.reader.get_shape(make_dataid(name='C05'), {}), (2, 5)) def test_data_load(self): """Test data loading.""" - from satpy import DatasetID + from satpy.tests.utils import make_dataid res = self.reader.get_dataset( - DatasetID(name='C05', calibration='reflectance'), {}) + make_dataid(name='C05', calibration='reflectance'), {}) np.testing.assert_allclose(res.data, self.expected_rad, equal_nan=True) self.assertNotIn('scale_factor', res.attrs) diff --git a/satpy/tests/reader_tests/test_seviri_l1b_hrit.py b/satpy/tests/reader_tests/test_seviri_l1b_hrit.py index e993efa45e..38138a4254 100644 --- a/satpy/tests/reader_tests/test_seviri_l1b_hrit.py +++ b/satpy/tests/reader_tests/test_seviri_l1b_hrit.py @@ -27,7 +27,7 @@ from satpy.readers.seviri_l1b_hrit import (HRITMSGFileHandler, HRITMSGPrologueFileHandler, HRITMSGEpilogueFileHandler, NoValidOrbitParams, pad_data) from satpy.readers.seviri_base import CHANNEL_NAMES, VIS_CHANNELS -from satpy.dataset import DatasetID +from satpy.tests.utils import make_dataid def new_get_hd(instance, hdr_info): @@ -135,8 +135,7 @@ def test_read_hrv_band(self, memmap): @mock.patch('satpy.readers.seviri_l1b_hrit.HRITMSGFileHandler.calibrate') def test_get_dataset(self, calibrate, parent_get_dataset, _get_timestamps): """Test getting the hrv dataset.""" - key = mock.MagicMock(calibration='calibration') - key.name = 'HRV' + key = make_dataid(name='HRV', calibration='reflectance') info = {'units': 'units', 'wavelength': 'wavelength', 'standard_name': 'standard_name'} timestamps = np.arange(0, 464, dtype='datetime64[ns]') @@ -148,7 +147,7 @@ def test_get_dataset(self, calibrate, parent_get_dataset, _get_timestamps): # Test method calls parent_get_dataset.assert_called_with(key, info) - calibrate.assert_called_with(parent_get_dataset(), key.calibration) + calibrate.assert_called_with(parent_get_dataset(), key['calibration']) # Test attributes (just check if raw metadata is there and then remove it before checking the remaining # attributes) @@ -182,7 +181,7 @@ def test_get_dataset(self, calibrate, parent_get_dataset, _get_timestamps): @mock.patch('satpy.readers.seviri_l1b_hrit.HRITMSGFileHandler.calibrate') def test_get_dataset_non_fill(self, calibrate, parent_get_dataset, _get_timestamps): """Test getting a non-filled hrv dataset.""" - key = mock.MagicMock(calibration='calibration') + key = make_dataid(name='HRV', calibration='reflectance') key.name = 'HRV' info = {'units': 'units', 'wavelength': 'wavelength', 'standard_name': 'standard_name'} timestamps = np.arange(0, 464, dtype='datetime64[ns]') @@ -195,7 +194,7 @@ def test_get_dataset_non_fill(self, calibrate, parent_get_dataset, _get_timestam # Test method calls parent_get_dataset.assert_called_with(key, info) - calibrate.assert_called_with(parent_get_dataset(), key.calibration) + calibrate.assert_called_with(parent_get_dataset(), key['calibration']) # Test attributes (just check if raw metadata is there and then remove it before checking the remaining # attributes) @@ -241,7 +240,7 @@ def test_pad_data(self): def test_get_area_def(self): """Test getting the area def.""" from pyresample.utils import proj4_radius_parameters - area = self.reader.get_area_def(DatasetID('HRV')) + area = self.reader.get_area_def(make_dataid(name='HRV')) self.assertEqual(area.area_extent, (-45561979844414.07, -3720765401003.719, 45602912357076.38, 77771774058.38356)) proj_dict = area.proj_dict @@ -253,7 +252,7 @@ def test_get_area_def(self): self.assertEqual(proj_dict['proj'], 'geos') self.assertEqual(proj_dict['units'], 'm') self.reader.fill_hrv = False - area = self.reader.get_area_def(DatasetID('HRV')) + area = self.reader.get_area_def(make_dataid(name='HRV')) self.assertEqual(area.defs[0].area_extent, (-22017598561055.01, -2926674655354.9604, 23564847539690.22, 77771774058.38356)) self.assertEqual(area.defs[1].area_extent, @@ -326,7 +325,7 @@ def setUp(self, fromfile): def test_get_area_def(self): """Test getting the area def.""" from pyresample.utils import proj4_radius_parameters - area = self.reader.get_area_def(DatasetID('VIS006')) + area = self.reader.get_area_def(make_dataid(name='VIS006')) proj_dict = area.proj_dict a, b = proj4_radius_parameters(proj_dict) self.assertEqual(a, 6378169.0) @@ -341,7 +340,7 @@ def test_get_area_def(self): # Data shifted by 1.5km to N-W self.reader.mda['offset_corrected'] = False - area = self.reader.get_area_def(DatasetID('VIS006')) + area = self.reader.get_area_def(make_dataid(name='VIS006')) self.assertEqual(area.area_extent, (-77771772558.38356, -3720765402503.719, 30310525627938.438, 77771772558.38356)) @@ -421,7 +420,7 @@ def get_header_patched(self): prologue=pro, epilogue=epi, ext_calib_coefs=coefs, calib_mode='GSICS') for ch_id, ch_name in CHANNEL_NAMES.items(): - if ch_name in coefs.keys(): + if ch_name in coefs: gain, offset = coefs[ch_name]['gain'], coefs[ch_name]['offset'] elif ch_name not in VIS_CHANNELS: gain, offset = gsics_gain[ch_id - 1], gsics_offset[ch_id - 1] @@ -442,7 +441,7 @@ def get_header_patched(self): @mock.patch('satpy.readers.seviri_l1b_hrit.HRITMSGFileHandler.calibrate') def test_get_dataset(self, calibrate, parent_get_dataset, _get_timestamps): """Test getting the dataset.""" - key = mock.MagicMock(calibration='calibration') + key = make_dataid(name='VIS006', calibration='reflectance') info = {'units': 'units', 'wavelength': 'wavelength', 'standard_name': 'standard_name'} timestamps = np.array([1, 2, 3], dtype='datetime64[ns]') @@ -454,7 +453,7 @@ def test_get_dataset(self, calibrate, parent_get_dataset, _get_timestamps): # Test method calls parent_get_dataset.assert_called_with(key, info) - calibrate.assert_called_with(parent_get_dataset(), key.calibration) + calibrate.assert_called_with(parent_get_dataset(), key['calibration']) # Test attributes (just check if raw metadata is there and then remove it before checking the remaining # attributes) @@ -513,6 +512,7 @@ def test_get_timestamps(self): self.assertTrue(np.all(msec[1:-1] == np.arange(len(tline) - 2))) def test_get_header(self): + """Test getting the header.""" # Make sure that the actual satellite position is only included if available self.reader.mda['orbital_parameters'] = {} self.reader.prologue_.get_satpos.return_value = 1, 2, 3 diff --git a/satpy/tests/reader_tests/test_seviri_l1b_native.py b/satpy/tests/reader_tests/test_seviri_l1b_native.py index 474d0d685b..7fdcae7de4 100644 --- a/satpy/tests/reader_tests/test_seviri_l1b_native.py +++ b/satpy/tests/reader_tests/test_seviri_l1b_native.py @@ -26,7 +26,9 @@ NativeMSGFileHandler, get_available_channels, ) -from satpy.dataset import DatasetID + + +from satpy.tests.utils import make_dataid CHANNEL_INDEX_LIST = ['VIS006', 'VIS008', 'IR_016', 'IR_039', @@ -51,7 +53,7 @@ TEST_AREA_EXTENT_EARTHMODEL1_VISIR_FULLDISK = { 'earth_model': 1, - 'dataset_id': DatasetID(name='VIS006'), + 'dataset_id': make_dataid(name='VIS006'), 'is_full_disk': True, 'is_rapid_scan': 0, 'expected_area_def': { @@ -69,7 +71,7 @@ TEST_AREA_EXTENT_EARTHMODEL1_VISIR_ROI = { 'earth_model': 1, - 'dataset_id': DatasetID(name='VIS006'), + 'dataset_id': make_dataid(name='VIS006'), 'is_full_disk': False, 'is_rapid_scan': 0, 'expected_area_def': { @@ -87,7 +89,7 @@ TEST_AREA_EXTENT_EARTHMODEL1_HRV_FULLDISK = { 'earth_model': 1, - 'dataset_id': DatasetID(name='HRV'), + 'dataset_id': make_dataid(name='HRV'), 'is_full_disk': True, 'is_rapid_scan': 0, 'expected_area_def': { @@ -106,7 +108,7 @@ TEST_AREA_EXTENT_EARTHMODEL1_HRV_RAPIDSCAN = { 'earth_model': 1, - 'dataset_id': DatasetID(name='HRV'), + 'dataset_id': make_dataid(name='HRV'), 'is_full_disk': False, 'is_rapid_scan': 1, 'expected_area_def': { @@ -124,7 +126,7 @@ TEST_AREA_EXTENT_EARTHMODEL1_HRV_ROI = { 'earth_model': 1, - 'dataset_id': DatasetID(name='HRV'), + 'dataset_id': make_dataid(name='HRV'), 'is_full_disk': False, 'is_rapid_scan': 0, 'expected_area_def': { @@ -142,7 +144,7 @@ TEST_AREA_EXTENT_EARTHMODEL2_VISIR_FULLDISK = { 'earth_model': 2, - 'dataset_id': DatasetID(name='VIS006'), + 'dataset_id': make_dataid(name='VIS006'), 'is_full_disk': True, 'is_rapid_scan': 0, 'expected_area_def': { @@ -160,7 +162,7 @@ TEST_AREA_EXTENT_EARTHMODEL2_HRV_FULLDISK = { 'earth_model': 2, - 'dataset_id': DatasetID(name='HRV'), + 'dataset_id': make_dataid(name='HRV'), 'is_full_disk': True, 'is_rapid_scan': 0, 'expected_area_def': { @@ -179,7 +181,7 @@ TEST_AREA_EXTENT_EARTHMODEL2_HRV_RAPIDSCAN = { 'earth_model': 2, - 'dataset_id': DatasetID(name='HRV'), + 'dataset_id': make_dataid(name='HRV'), 'is_full_disk': False, 'is_rapid_scan': 1, 'expected_area_def': { @@ -197,7 +199,7 @@ TEST_AREA_EXTENT_EARTHMODEL2_VISIR_ROI = { 'earth_model': 2, - 'dataset_id': DatasetID(name='VIS006'), + 'dataset_id': make_dataid(name='VIS006'), 'is_full_disk': False, 'is_rapid_scan': 0, 'expected_area_def': { @@ -215,7 +217,7 @@ TEST_AREA_EXTENT_EARTHMODEL2_HRV_ROI = { 'earth_model': 2, - 'dataset_id': DatasetID(name='HRV'), + 'dataset_id': make_dataid(name='HRV'), 'is_full_disk': False, 'is_rapid_scan': 0, 'expected_area_def': { @@ -233,7 +235,7 @@ TEST_CALIBRATION_MODE = { 'earth_model': 1, - 'dataset_id': DatasetID(name='IR_108', calibration='radiance'), + 'dataset_id': make_dataid(name='IR_108', calibration='radiance'), 'is_full_disk': True, 'is_rapid_scan': 0, 'calibration': 'radiance', @@ -294,7 +296,7 @@ def create_test_header(earth_model, dataset_id, is_full_disk, is_rapid_scan): Contains sufficient attributes for NativeMSGFileHandler.get_area_extent to be able to execute. """ - if dataset_id.name == 'HRV': + if dataset_id['name'] == 'HRV': reference_grid = 'ReferenceGridHRV' column_dir_grid_step = 1.0001343488693237 line_dir_grid_step = 1.0001343488693237 @@ -456,7 +458,7 @@ def test_earthmodel1_hrv_rapidscan(self): calculated, expected = self.prepare_area_defs( TEST_AREA_EXTENT_EARTHMODEL1_HRV_RAPIDSCAN ) - print(calculated.area_extent) + assertNumpyArraysEqual(np.array(calculated.area_extent), np.array(expected['Area extent'])) @@ -560,7 +562,7 @@ def create_test_header(earth_model, dataset_id, is_full_disk, is_rapid_scan): Mocked NativeMSGFileHandler with sufficient attributes for NativeMSGFileHandler._convert_to_radiance and NativeMSGFileHandler.calibrate to be able to execute. """ - if dataset_id.name == 'HRV': + if dataset_id['name'] == 'HRV': # reference_grid = 'ReferenceGridHRV' column_dir_grid_step = 1.0001343488693237 line_dir_grid_step = 1.0001343488693237 @@ -656,7 +658,7 @@ def calibration_mode_test(self, test_dict, cal_mode): earth_model = test_dict['earth_model'] dataset_id = test_dict['dataset_id'] - index = CHANNEL_INDEX_LIST.index(dataset_id.name) + index = CHANNEL_INDEX_LIST.index(dataset_id['name']) # determine the cal coeffs needed for the expected data calculation if cal_mode == 'nominal': diff --git a/satpy/tests/reader_tests/test_seviri_l1b_nc.py b/satpy/tests/reader_tests/test_seviri_l1b_nc.py index b09085e4c1..93c3741983 100644 --- a/satpy/tests/reader_tests/test_seviri_l1b_nc.py +++ b/satpy/tests/reader_tests/test_seviri_l1b_nc.py @@ -25,9 +25,11 @@ import xarray as xr from satpy.readers.seviri_l1b_nc import NCSEVIRIFileHandler +from satpy.tests.utils import make_dataid def new_read_file(instance): + """Fake read file.""" new_ds = xr.Dataset({'ch4': (['num_rows_vis_ir', 'num_columns_vis_ir'], np.random.random((2, 2))), 'planned_chan_processing': (["channels_dim"], np.ones(12, dtype=np.int8) * 2)}, coords={'num_rows_vis_ir': [1, 2], 'num_columns_vis_ir': [1, 2]}) @@ -48,8 +50,10 @@ def new_read_file(instance): class TestNCSEVIRIFileHandler(unittest.TestCase): + """Tester for the file handler.""" def setUp(self): + """Set up the test case.""" with mock.patch.object(NCSEVIRIFileHandler, '_read_file', new=new_read_file): self.reader = NCSEVIRIFileHandler( 'filename', @@ -60,8 +64,7 @@ def setUp(self): def test_get_dataset_remove_attrs(self): """Test getting the hrv dataset.""" - dataset_id = mock.MagicMock(calibration='counts') - dataset_id.name = 'IR_039' + dataset_id = make_dataid(name='IR_039', calibration='counts') dataset_info = {'nc_key': 'ch4', 'units': 'units', 'wavelength': 'wavelength', 'standard_name': 'standard_name'} res = self.reader.get_dataset(dataset_id, dataset_info) diff --git a/satpy/tests/reader_tests/test_smos_l2_wind.py b/satpy/tests/reader_tests/test_smos_l2_wind.py index bde2e41eef..17978c7968 100644 --- a/satpy/tests/reader_tests/test_smos_l2_wind.py +++ b/satpy/tests/reader_tests/test_smos_l2_wind.py @@ -28,9 +28,10 @@ class FakeNetCDF4FileHandlerSMOSL2WIND(FakeNetCDF4FileHandler): - """Swap-in NetCDF4 File Handler""" + """Swap-in NetCDF4 File Handler.""" + def get_test_content(self, filename, filename_info, filetype_info): - """Mimic reader input file content""" + """Mimic reader input file content.""" from xarray import DataArray dt_s = filename_info.get('start_time', datetime(2020, 4, 22, 12, 0, 0)) dt_e = filename_info.get('end_time', datetime(2020, 4, 22, 12, 0, 0)) @@ -67,17 +68,18 @@ def get_test_content(self, filename, filename_info, filetype_info): file_content['wind_speed'].attrs['_FillValue'] = -999.0 else: - assert False + raise AssertionError() return file_content class TestSMOSL2WINDReader(unittest.TestCase): - """Test SMOS L2 WINDReader""" + """Test SMOS L2 WINDReader.""" + yaml_file = "smos_l2_wind.yaml" def setUp(self): - """Wrap NetCDF4 file handler with our own fake handler""" + """Wrap NetCDF4 file handler with our own fake handler.""" from satpy.config import config_search_paths from satpy.readers.smos_l2_wind import SMOSL2WINDFileHandler self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file)) @@ -87,7 +89,7 @@ def setUp(self): self.p.is_local = True def tearDown(self): - """Stop wrapping the NetCDF4 file handler""" + """Stop wrapping the NetCDF4 file handler.""" self.p.stop() def test_init(self): @@ -103,7 +105,7 @@ def test_init(self): self.assertTrue(r.file_handlers) def test_load_wind_speed(self): - """Load wind_speed dataset""" + """Load wind_speed dataset.""" from satpy.readers import load_reader r = load_reader(self.reader_configs) with mock.patch('satpy.readers.smos_l2_wind.netCDF4.Variable', xr.DataArray): @@ -125,7 +127,7 @@ def test_load_wind_speed(self): self.assertEqual(d.y[d.shape[0] - 1].data, 89.75) def test_load_lat(self): - """Load lat dataset""" + """Load lat dataset.""" from satpy.readers import load_reader r = load_reader(self.reader_configs) with mock.patch('satpy.readers.smos_l2_wind.netCDF4.Variable', xr.DataArray): @@ -142,7 +144,7 @@ def test_load_lat(self): self.assertEqual(d.data[d.shape[0] - 1], 89.75) def test_load_lon(self): - """Load lon dataset""" + """Load lon dataset.""" from satpy.readers import load_reader r = load_reader(self.reader_configs) with mock.patch('satpy.readers.smos_l2_wind.netCDF4.Variable', xr.DataArray): @@ -159,7 +161,7 @@ def test_load_lon(self): self.assertEqual(d.data[d.shape[0] - 1], 179.75) def test_adjust_lon(self): - """Load adjust longitude dataset""" + """Load adjust longitude dataset.""" from xarray import DataArray from satpy.readers.smos_l2_wind import SMOSL2WINDFileHandler smos_l2_wind_fh = SMOSL2WINDFileHandler('SM_OPER_MIR_SCNFSW_20200420T021649_20200420T035013_110_001_7.nc', @@ -172,7 +174,7 @@ def test_adjust_lon(self): self.assertEqual(adjusted.data.tolist(), expected.data.tolist()) def test_roll_dataset(self): - """Load roll of dataset along the lon coordinate""" + """Load roll of dataset along the lon coordinate.""" from xarray import DataArray from satpy.readers.smos_l2_wind import SMOSL2WINDFileHandler smos_l2_wind_fh = SMOSL2WINDFileHandler('SM_OPER_MIR_SCNFSW_20200420T021649_20200420T035013_110_001_7.nc', diff --git a/satpy/tests/reader_tests/test_vaisala_gld360.py b/satpy/tests/reader_tests/test_vaisala_gld360.py index 4dcffdbb60..32a6d052b5 100644 --- a/satpy/tests/reader_tests/test_vaisala_gld360.py +++ b/satpy/tests/reader_tests/test_vaisala_gld360.py @@ -22,7 +22,7 @@ import numpy as np from satpy.readers.vaisala_gld360 import VaisalaGLD360TextFileHandler -from satpy.dataset import DatasetID +from satpy.tests.utils import make_dataid import unittest @@ -32,7 +32,6 @@ class TestVaisalaGLD360TextFileHandler(unittest.TestCase): def test_vaisala_gld360(self): """Test basic functionality for vaisala file handler.""" - expected = np.array([12.3, 13.2, -31.]) filename = StringIO( @@ -48,7 +47,7 @@ def test_vaisala_gld360(self): ) filename.close() - dataset_id = DatasetID('power') + dataset_id = make_dataid(name='power') dataset_info = {'units': 'kA'} result = self.handler.get_dataset(dataset_id, dataset_info).values diff --git a/satpy/tests/reader_tests/test_viirs_compact.py b/satpy/tests/reader_tests/test_viirs_compact.py index 7247f64d22..29161a310d 100644 --- a/satpy/tests/reader_tests/test_viirs_compact.py +++ b/satpy/tests/reader_tests/test_viirs_compact.py @@ -2438,18 +2438,18 @@ def fill_h5(root, stuff): def test_get_dataset(self): """Retrieve datasets from a DNB file.""" from satpy.readers.viirs_compact import VIIRSCompactFileHandler - from satpy import DatasetID + from satpy.tests.utils import make_dataid filename_info = {} filetype_info = {'file_type': 'compact_dnb'} - dsid = DatasetID(name='DNB', calibration='radiance') + dsid = make_dataid(name='DNB', calibration='radiance') test = VIIRSCompactFileHandler(self.filename, filename_info, filetype_info) ds = test.get_dataset(dsid, {}) self.assertEqual(ds.shape, (752, 4064)) self.assertEqual(ds.dtype, np.float32) self.assertEqual(ds.attrs['rows_per_scan'], 16) - dsid = DatasetID(name='longitude_dnb') + dsid = make_dataid(name='longitude_dnb') ds = test.get_dataset(dsid, {'standard_name': 'longitude'}) self.assertEqual(ds.shape, (752, 4064)) self.assertEqual(ds.dtype, np.float32) diff --git a/satpy/tests/reader_tests/test_viirs_l1b.py b/satpy/tests/reader_tests/test_viirs_l1b.py index b2360e48de..0510e92823 100644 --- a/satpy/tests/reader_tests/test_viirs_l1b.py +++ b/satpy/tests/reader_tests/test_viirs_l1b.py @@ -37,9 +37,10 @@ class FakeNetCDF4FileHandler2(FakeNetCDF4FileHandler): - """Swap-in NetCDF4 File Handler""" + """Swap-in NetCDF4 File Handler.""" + def get_test_content(self, filename, filename_info, filetype_info): - """Mimic reader input file content""" + """Mimic reader input file content.""" dt = filename_info.get('start_time', datetime(2016, 1, 1, 12, 0, 0)) file_type = filename[:5].lower() # num_lines = { @@ -131,11 +132,12 @@ def get_test_content(self, filename, filename_info, filetype_info): class TestVIIRSL1BReader(unittest.TestCase): - """Test VIIRS L1B Reader""" + """Test VIIRS L1B Reader.""" + yaml_file = "viirs_l1b.yaml" def setUp(self): - """Wrap NetCDF4 file handler with our own fake handler""" + """Wrap NetCDF4 file handler with our own fake handler.""" from satpy.config import config_search_paths from satpy.readers.viirs_l1b import VIIRSL1BFileHandler self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file)) @@ -145,7 +147,7 @@ def setUp(self): self.p.is_local = True def tearDown(self): - """Stop wrapping the NetCDF4 file handler""" + """Stop wrapping the NetCDF4 file handler.""" self.p.stop() def test_init(self): @@ -161,7 +163,7 @@ def test_init(self): self.assertTrue(r.file_handlers) def test_load_every_m_band_bt(self): - """Test loading all M band brightness temperatures""" + """Test loading all M band brightness temperatures.""" from satpy.readers import load_reader r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ @@ -183,7 +185,7 @@ def test_load_every_m_band_bt(self): self.assertEqual(v.attrs['area'].lats.attrs['rows_per_scan'], 2) def test_load_every_m_band_refl(self): - """Test loading all M band reflectances""" + """Test loading all M band reflectances.""" from satpy.readers import load_reader r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ @@ -211,31 +213,31 @@ def test_load_every_m_band_refl(self): self.assertEqual(v.attrs['area'].lats.attrs['rows_per_scan'], 2) def test_load_every_m_band_rad(self): - """Test loading all M bands as radiances""" + """Test loading all M bands as radiances.""" from satpy.readers import load_reader - from satpy import DatasetID + from satpy.tests.utils import make_dataid r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ 'VL1BM_snpp_d20161130_t012400_c20161130054822.nc', 'VGEOM_snpp_d20161130_t012400_c20161130054822.nc', ]) r.create_filehandlers(loadables) - datasets = r.load([DatasetID('M01', calibration='radiance'), - DatasetID('M02', calibration='radiance'), - DatasetID('M03', calibration='radiance'), - DatasetID('M04', calibration='radiance'), - DatasetID('M05', calibration='radiance'), - DatasetID('M06', calibration='radiance'), - DatasetID('M07', calibration='radiance'), - DatasetID('M08', calibration='radiance'), - DatasetID('M09', calibration='radiance'), - DatasetID('M10', calibration='radiance'), - DatasetID('M11', calibration='radiance'), - DatasetID('M12', calibration='radiance'), - DatasetID('M13', calibration='radiance'), - DatasetID('M14', calibration='radiance'), - DatasetID('M15', calibration='radiance'), - DatasetID('M16', calibration='radiance')]) + datasets = r.load([make_dataid(name='M01', calibration='radiance'), + make_dataid(name='M02', calibration='radiance'), + make_dataid(name='M03', calibration='radiance'), + make_dataid(name='M04', calibration='radiance'), + make_dataid(name='M05', calibration='radiance'), + make_dataid(name='M06', calibration='radiance'), + make_dataid(name='M07', calibration='radiance'), + make_dataid(name='M08', calibration='radiance'), + make_dataid(name='M09', calibration='radiance'), + make_dataid(name='M10', calibration='radiance'), + make_dataid(name='M11', calibration='radiance'), + make_dataid(name='M12', calibration='radiance'), + make_dataid(name='M13', calibration='radiance'), + make_dataid(name='M14', calibration='radiance'), + make_dataid(name='M15', calibration='radiance'), + make_dataid(name='M16', calibration='radiance')]) self.assertEqual(len(datasets), 16) for v in datasets.values(): self.assertEqual(v.attrs['calibration'], 'radiance') @@ -245,7 +247,7 @@ def test_load_every_m_band_rad(self): self.assertEqual(v.attrs['area'].lats.attrs['rows_per_scan'], 2) def test_load_dnb_radiance(self): - """Test loading the main DNB dataset""" + """Test loading the main DNB dataset.""" from satpy.readers import load_reader r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ diff --git a/satpy/tests/reader_tests/test_viirs_sdr.py b/satpy/tests/reader_tests/test_viirs_sdr.py index 77bfc10f4d..0ca1c929ae 100644 --- a/satpy/tests/reader_tests/test_viirs_sdr.py +++ b/satpy/tests/reader_tests/test_viirs_sdr.py @@ -480,7 +480,7 @@ def test_load_all_m_bts(self): def test_load_all_m_radiances(self): """Load all M band radiances.""" from satpy.readers import load_reader - from satpy import DatasetID + from satpy.tests.utils import make_dsq r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ 'SVM01_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5', @@ -503,22 +503,22 @@ def test_load_all_m_radiances(self): ]) r.create_filehandlers(loadables) ds = r.load([ - DatasetID(name='M01', calibration='radiance', modifiers=None), - DatasetID(name='M02', calibration='radiance', modifiers=None), - DatasetID(name='M03', calibration='radiance', modifiers=None), - DatasetID(name='M04', calibration='radiance', modifiers=None), - DatasetID(name='M05', calibration='radiance', modifiers=None), - DatasetID(name='M06', calibration='radiance', modifiers=None), - DatasetID(name='M07', calibration='radiance', modifiers=None), - DatasetID(name='M08', calibration='radiance', modifiers=None), - DatasetID(name='M09', calibration='radiance', modifiers=None), - DatasetID(name='M10', calibration='radiance', modifiers=None), - DatasetID(name='M11', calibration='radiance', modifiers=None), - DatasetID(name='M12', calibration='radiance', modifiers=None), - DatasetID(name='M13', calibration='radiance', modifiers=None), - DatasetID(name='M14', calibration='radiance', modifiers=None), - DatasetID(name='M15', calibration='radiance', modifiers=None), - DatasetID(name='M16', calibration='radiance', modifiers=None), + make_dsq(name='M01', calibration='radiance'), + make_dsq(name='M02', calibration='radiance'), + make_dsq(name='M03', calibration='radiance'), + make_dsq(name='M04', calibration='radiance'), + make_dsq(name='M05', calibration='radiance'), + make_dsq(name='M06', calibration='radiance'), + make_dsq(name='M07', calibration='radiance'), + make_dsq(name='M08', calibration='radiance'), + make_dsq(name='M09', calibration='radiance'), + make_dsq(name='M10', calibration='radiance'), + make_dsq(name='M11', calibration='radiance'), + make_dsq(name='M12', calibration='radiance'), + make_dsq(name='M13', calibration='radiance'), + make_dsq(name='M14', calibration='radiance'), + make_dsq(name='M15', calibration='radiance'), + make_dsq(name='M16', calibration='radiance'), ]) self.assertEqual(len(ds), 16) for d in ds.values(): @@ -555,7 +555,7 @@ def test_load_i_no_files(self): 'GDNBO_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5', ]) r.create_filehandlers(loadables) - self.assertNotIn('I01', [x.name for x in r.available_dataset_ids]) + self.assertNotIn('I01', [x['name'] for x in r.available_dataset_ids]) ds = r.load(['I01']) self.assertEqual(len(ds), 0) @@ -610,7 +610,7 @@ def test_load_all_i_bts(self): def test_load_all_i_radiances(self): """Load all I band radiances.""" from satpy.readers import load_reader - from satpy import DatasetID + from satpy.tests.utils import make_dsq r = load_reader(self.reader_configs) loadables = r.select_files_from_pathnames([ 'SVI01_npp_d20120225_t1801245_e1802487_b01708_c20120226002130255476_noaa_ops.h5', @@ -622,11 +622,11 @@ def test_load_all_i_radiances(self): ]) r.create_filehandlers(loadables) ds = r.load([ - DatasetID(name='I01', calibration='radiance', modifiers=None), - DatasetID(name='I02', calibration='radiance', modifiers=None), - DatasetID(name='I03', calibration='radiance', modifiers=None), - DatasetID(name='I04', calibration='radiance', modifiers=None), - DatasetID(name='I05', calibration='radiance', modifiers=None), + make_dsq(name='I01', calibration='radiance'), + make_dsq(name='I02', calibration='radiance'), + make_dsq(name='I03', calibration='radiance'), + make_dsq(name='I04', calibration='radiance'), + make_dsq(name='I05', calibration='radiance'), ]) self.assertEqual(len(ds), 5) for d in ds.values(): diff --git a/satpy/tests/reader_tests/test_virr_l1b.py b/satpy/tests/reader_tests/test_virr_l1b.py index ddbaa22fd3..1fa3e57454 100644 --- a/satpy/tests/reader_tests/test_virr_l1b.py +++ b/satpy/tests/reader_tests/test_virr_l1b.py @@ -130,23 +130,23 @@ def _fy3_helper(self, platform_name, reader, Emissive_units): datasets = reader.load([band for band in band_values]) for dataset in datasets: # Object returned by get_dataset. - ds = datasets[dataset.name] + ds = datasets[dataset['name']] attributes = ds.attrs self.assertTrue(isinstance(ds.data, da.Array)) self.assertEqual('virr', attributes['sensor']) self.assertEqual(platform_name, attributes['platform_name']) self.assertEqual(datetime.datetime(2018, 12, 25, 21, 41, 47, 90000), attributes['start_time']) self.assertEqual(datetime.datetime(2018, 12, 25, 21, 47, 28, 254000), attributes['end_time']) - self.assertEqual((19, 20), datasets[dataset.name].shape) - self.assertEqual(('y', 'x'), datasets[dataset.name].dims) - if dataset.name in ['1', '2', '6', '7', '8', '9', '10']: + self.assertEqual((19, 20), datasets[dataset['name']].shape) + self.assertEqual(('y', 'x'), datasets[dataset['name']].dims) + if dataset['name'] in ['1', '2', '6', '7', '8', '9', '10']: self._band_helper(attributes, '%', 'reflectance', 'toa_bidirectional_reflectance', 'virr_l1b', 7, 1000) - elif dataset.name in ['3', '4', '5']: + elif dataset['name'] in ['3', '4', '5']: self._band_helper(attributes, Emissive_units, 'brightness_temperature', 'toa_brightness_temperature', 'virr_l1b', 3, 1000) - elif dataset.name in ['longitude', 'latitude']: + elif dataset['name'] in ['longitude', 'latitude']: self.assertEqual('degrees', attributes['units']) self.assertTrue(attributes['standard_name'] in ['longitude', 'latitude']) self.assertEqual(['virr_l1b', 'virr_geoxx'], attributes['file_type']) @@ -158,7 +158,7 @@ def _fy3_helper(self, platform_name, reader, Emissive_units): 'sensor_azimuth_angle']) self.assertEqual(['virr_geoxx', 'virr_l1b'], attributes['file_type']) self.assertEqual(('longitude', 'latitude'), attributes['coordinates']) - self.assertEqual(band_values[dataset.name], + self.assertEqual(band_values[dataset['name']], round(float(np.array(ds[ds.shape[0] // 2][ds.shape[1] // 2])), 6)) def test_fy3b_file(self): diff --git a/satpy/tests/test_composites.py b/satpy/tests/test_composites.py index ee7d254661..2345a6b833 100644 --- a/satpy/tests/test_composites.py +++ b/satpy/tests/test_composites.py @@ -502,15 +502,15 @@ def test_inline_composites(self): comps = cl_.compositors # Check that "fog" product has all its prerequisites defined keys = comps['visir'].keys() - fog = [comps['visir'][dsid] for dsid in keys if "fog" == dsid.name][0] - self.assertEqual(fog.attrs['prerequisites'][0], '_fog_dep_0') - self.assertEqual(fog.attrs['prerequisites'][1], '_fog_dep_1') + fog = [comps['visir'][dsid] for dsid in keys if "fog" == dsid['name']][0] + self.assertEqual(fog.attrs['prerequisites'][0]['name'], '_fog_dep_0') + self.assertEqual(fog.attrs['prerequisites'][1]['name'], '_fog_dep_1') self.assertEqual(fog.attrs['prerequisites'][2], 10.8) # Check that the sub-composite dependencies use wavelengths # (numeric values) keys = comps['visir'].keys() - fog_dep_ids = [dsid for dsid in keys if "fog_dep" in dsid.name] + fog_dep_ids = [dsid for dsid in keys if "fog_dep" in dsid['name']] self.assertEqual(comps['visir'][fog_dep_ids[0]].attrs['prerequisites'], [12.0, 10.8]) self.assertEqual(comps['visir'][fog_dep_ids[1]].attrs['prerequisites'], @@ -522,7 +522,7 @@ def test_inline_composites(self): cl_.load_sensor_composites('seviri') comps = cl_.compositors keys = comps['seviri'].keys() - fog_dep_ids = [dsid for dsid in keys if "fog_dep" in dsid.name] + fog_dep_ids = [dsid for dsid in keys if "fog_dep" in dsid['name']] self.assertEqual(comps['seviri'][fog_dep_ids[0]].attrs['prerequisites'], ['IR_120', 'IR_108']) self.assertEqual(comps['seviri'][fog_dep_ids[1]].attrs['prerequisites'], diff --git a/satpy/tests/test_dataset.py b/satpy/tests/test_dataset.py index 82bd360dc2..26a0c6a90c 100644 --- a/satpy/tests/test_dataset.py +++ b/satpy/tests/test_dataset.py @@ -17,41 +17,58 @@ # satpy. If not, see . """Test objects and functions in the dataset module.""" -from datetime import datetime import unittest +from datetime import datetime + +import numpy as np +import pytest +from satpy.tests.utils import make_cid, make_dataid, make_dsq -class TestDatasetID(unittest.TestCase): - """Test DatasetID object creation and other methods.""" + +class TestDataID(unittest.TestCase): + """Test DataID object creation and other methods.""" def test_basic_init(self): - """Test basic ways of creating a DatasetID.""" - from satpy.dataset import DatasetID - DatasetID(name="a") - DatasetID(name="a", wavelength=0.86) - DatasetID(name="a", resolution=1000) - DatasetID(name="a", calibration='radiance') - DatasetID(name="a", wavelength=0.86, resolution=250, - calibration='radiance') - DatasetID(name="a", wavelength=0.86, resolution=250, - calibration='radiance', modifiers=('sunz_corrected',)) - DatasetID(wavelength=0.86) + """Test basic ways of creating a DataID.""" + from satpy.dataset import DataID, default_id_keys_config as dikc, minimal_default_keys_config as mdkc + + did = DataID(dikc, name="a") + assert did['name'] == 'a' + assert did['modifiers'] == tuple() + DataID(dikc, name="a", wavelength=0.86) + DataID(dikc, name="a", resolution=1000) + DataID(dikc, name="a", calibration='radiance') + DataID(dikc, name="a", wavelength=0.86, resolution=250, + calibration='radiance') + DataID(dikc, name="a", wavelength=0.86, resolution=250, + calibration='radiance', modifiers=('sunz_corrected',)) + with pytest.raises(ValueError): + DataID(dikc, wavelength=0.86) + did = DataID(mdkc, name='comp24', resolution=500) + assert did['resolution'] == 500 def test_init_bad_modifiers(self): """Test that modifiers are a tuple.""" - from satpy.dataset import DatasetID - self.assertRaises(TypeError, DatasetID, name="a", modifiers="str") + from satpy.dataset import DataID, default_id_keys_config as dikc + self.assertRaises(TypeError, DataID, dikc, name="a", modifiers="str") def test_compare_no_wl(self): """Compare fully qualified wavelength ID to no wavelength ID.""" - from satpy.dataset import DatasetID - d1 = DatasetID(name="a", wavelength=(0.1, 0.2, 0.3)) - d2 = DatasetID(name="a", wavelength=None) + from satpy.dataset import DataID, default_id_keys_config as dikc + d1 = DataID(dikc, name="a", wavelength=(0.1, 0.2, 0.3)) + d2 = DataID(dikc, name="a", wavelength=None) # this happens when sorting IDs during dependency checks self.assertFalse(d1 < d2) self.assertTrue(d2 < d1) + def test_bad_calibration(self): + """Test that asking for a bad calibration fails.""" + from satpy.dataset import DataID, default_id_keys_config as dikc + with pytest.raises(ValueError): + DataID(dikc, name='C05', calibration='_bad_') + class TestCombineMetadata(unittest.TestCase): """Test how metadata is combined.""" @@ -120,3 +137,254 @@ def test_combine_arrays(self): object() ] assert "quality" not in combine_metadata(*dts6) + + +def test_dataid(): + """Test the DataID object.""" + from satpy.dataset import DataID, WavelengthRange, ModifierTuple, ValueList + + # Check that enum is translated to type. + did = make_dataid() + assert issubclass(did._id_keys['calibration']['type'], ValueList) + assert 'enum' not in did._id_keys['calibration'] + + # Check that None is never a valid value + did = make_dataid(name='cheese_shops', resolution=None) + assert 'resolution' not in did + assert 'None' not in did.__repr__() + with pytest.raises(ValueError): + make_dataid(name=None, resolution=1000) + + # Check that defaults are applied correctly + assert did['modifiers'] == ModifierTuple() + + # Check that from_dict creates a distinct instance... + did2 = did.from_dict(dict(name='cheese_shops', resolution=None)) + assert did is not did2 + # ...But is equal + assert did2 == did + + # Check that the instance is immutable + with pytest.raises(TypeError): + did['resolution'] = 1000 + + # Check that a missing required field crashes + with pytest.raises(ValueError): + make_dataid(resolution=1000) + + # Check to_dict + assert did.to_dict() == dict(name='cheese_shops', modifiers=tuple()) + + # Check repr + did = make_dataid(name='VIS008', resolution=111) + assert repr(did) == "DataID(name='VIS008', resolution=111, modifiers=())" + + # Check inequality + default_id_keys_config = {'name': None, + 'wavelength': { + 'type': WavelengthRange, + }, + 'resolution': None, + 'calibration': { + 'enum': [ + 'reflectance', + 'brightness_temperature', + 'radiance', + 'counts' + ] + }, + 'modifiers': { + 'default': ModifierTuple(), + 'type': ModifierTuple, + }, + } + assert DataID(default_id_keys_config, wavelength=10) != DataID(default_id_keys_config, name="VIS006") + + +def test_dataid_equal_if_enums_different(): + """Check that dataids with different enums but same items are equal.""" + from satpy.dataset import DataID, WavelengthRange, ModifierTuple + id_keys_config1 = {'name': None, + 'wavelength': { + 'type': WavelengthRange, + }, + 'resolution': None, + 'calibration': { + 'enum': [ + 'c1', + 'c2', + 'c3', + ] + }, + 'modifiers': { + 'default': ModifierTuple(), + 'type': ModifierTuple, + }, + } + + id_keys_config2 = {'name': None, + 'wavelength': { + 'type': WavelengthRange, + }, + 'resolution': None, + 'calibration': { + 'enum': [ + 'c1', + 'c1.5', + 'c2', + 'c2.5', + 'c3' + ] + }, + 'modifiers': { + 'default': ModifierTuple(), + 'type': ModifierTuple, + }, + } + assert DataID(id_keys_config1, name='ni', calibration='c2') == DataID(id_keys_config2, name="ni", calibration='c2') + + +def test_dataid_copy(): + """Test copying a DataID.""" + from satpy.dataset import DataID, default_id_keys_config as dikc + from copy import deepcopy + + did = DataID(dikc, name="a", resolution=1000) + did2 = deepcopy(did) + assert did2 == did + assert did2.id_keys == did.id_keys + + +def test_dataid_pickle(): + """Test dataid pickling roundtrip.""" + from satpy.tests.utils import make_dataid + import pickle + did = make_dataid(name='hi', wavelength=(10, 11, 12), resolution=1000, calibration='radiance') + assert did == pickle.loads(pickle.dumps(did)) + + +def test_dataquery(): + """Test DataQuery objects.""" + from satpy.dataset import DataQuery + + DataQuery(name='cheese_shops') + + # Check repr + did = DataQuery(name='VIS008', resolution=111) + assert repr(did) == "DataQuery(name='VIS008', resolution=111)" + + # Check inequality + assert DataQuery(wavelength=10) != DataQuery(name="VIS006") + + +def test_id_query_interactions(): + """Test interactions between DataIDs and DataQuery's.""" + from satpy.dataset import DataQuery, DataID, WavelengthRange, ModifierTuple, minimal_default_keys_config + + default_id_keys_config = {'name': { + 'required': True, + }, + 'wavelength': { + 'type': WavelengthRange, + }, + 'resolution': None, + 'calibration': { + 'enum': [ + 'reflectance', + 'brightness_temperature', + 'radiance', + 'counts' + ] + }, + 'modifiers': { + 'default': ModifierTuple(), + 'type': ModifierTuple, + }, + } + + # Check hash equality + dq = DataQuery(modifiers=tuple(), name='cheese_shops') + did = DataID(default_id_keys_config, name='cheese_shops') + assert hash(dq) == hash(did) + + # Check did filtering + did2 = DataID(default_id_keys_config, name='ni') + res = dq.filter_dataids([did2, did]) + assert len(res) == 1 + assert res[0] == did + + dataid_container = [DataID(default_id_keys_config, + name='ds1', + resolution=250, + calibration='reflectance', + modifiers=tuple())] + dq = DataQuery(wavelength=0.22, modifiers=tuple()) + assert len(dq.filter_dataids(dataid_container)) == 0 + dataid_container = [DataID(minimal_default_keys_config, + name='natural_color')] + dq = DataQuery(name='natural_color', resolution=250) + assert len(dq.filter_dataids(dataid_container)) == 1 + + dq = make_dsq(wavelength=0.22, modifiers=('mod1',)) + did = make_cid(name='static_image') + assert len(dq.filter_dataids([did])) == 0 + + # Check did sorting + dq = DataQuery(name='cheese_shops', wavelength=2, modifiers='*') + did = DataID(default_id_keys_config, name='cheese_shops', wavelength=(1, 2, 3)) + did2 = DataID(default_id_keys_config, name='cheese_shops', wavelength=(1.1, 2.1, 3.1)) + dsids, distances = dq.sort_dataids([did2, did]) + assert list(dsids) == [did, did2] + assert np.allclose(distances, [0, 0.1]) + + dq = DataQuery(name='cheese_shops') + did = DataID(default_id_keys_config, name='cheese_shops', resolution=200) + did2 = DataID(default_id_keys_config, name='cheese_shops', resolution=400) + dsids, distances = dq.sort_dataids([did2, did]) + assert list(dsids) == [did, did2] + assert distances[0] < distances[1] + + did = DataID(default_id_keys_config, name='cheese_shops', calibration='counts') + did2 = DataID(default_id_keys_config, name='cheese_shops', calibration='reflectance') + dsids, distances = dq.sort_dataids([did2, did]) + assert list(dsids) == [did2, did] + assert distances[0] < distances[1] + + did = DataID(default_id_keys_config, name='cheese_shops', modifiers=tuple()) + did2 = DataID(default_id_keys_config, name='cheese_shops', modifiers=tuple(['out_of_stock'])) + dsids, distances = dq.sort_dataids([did2, did]) + assert list(dsids) == [did, did2] + assert distances[0] < distances[1] + + # Check (in)equality + assert DataQuery(wavelength=10) != DataID(default_id_keys_config, name="VIS006") + + +def test_wavelength_range(): + """Test the wavelength range object.""" + from satpy.dataset import WavelengthRange + + wr = WavelengthRange(1, 2, 3) + assert 1.2 == wr + assert .9 != wr + assert wr == (1, 2, 3) + assert wr == (1, 2, 3, 'µm') + + # Check containement + assert 1.2 in wr + assert .9 not in wr + assert WavelengthRange(1, 2, 3) in wr + assert WavelengthRange(1.1, 2.2, 3.3) not in wr + assert WavelengthRange(1.2, 2, 2.8) in wr + assert WavelengthRange(10, 20, 30) not in wr + assert 'bla' not in wr + assert None not in wr + wr2 = WavelengthRange(1, 2, 3, 'µm') + assert wr2 in wr + wr2 = WavelengthRange(1, 2, 3, 'nm') + with pytest.raises(NotImplementedError): + wr2 in wr + + # Check __str__ + assert str(wr) == "2 µm (1-3 µm)" + assert str(wr2) == "2 nm (1-3 nm)" diff --git a/satpy/tests/test_multiscene.py b/satpy/tests/test_multiscene.py index 6508ac10fa..6d5900bcae 100644 --- a/satpy/tests/test_multiscene.py +++ b/satpy/tests/test_multiscene.py @@ -18,14 +18,45 @@ """Unit tests for multiscene.py.""" import os -import tempfile import shutil -from datetime import datetime +import tempfile import unittest +from datetime import datetime from unittest import mock +from satpy.dataset import DataID, ModifierTuple, WavelengthRange + DEFAULT_SHAPE = (5, 10) +local_id_keys_config = {'name': { + 'required': True, +}, + 'wavelength': { + 'type': WavelengthRange, +}, + 'resolution': None, + 'calibration': { + 'enum': [ + 'reflectance', + 'brightness_temperature', + 'radiance', + 'counts' + ] +}, + 'polarization': None, + 'level': None, + 'modifiers': { + 'required': True, + 'default': ModifierTuple(), + 'type': ModifierTuple, +}, +} + + +def make_dataid(**items): + """Make a data id.""" + return DataID(local_id_keys_config, **items) + def _fake_get_enhanced_image(img, enhance=None, overlay=None, decorate=None): from trollimage.xrimage import XRImage @@ -61,7 +92,7 @@ def _create_test_dataset(name, shape=DEFAULT_SHAPE, area=None): return xr.DataArray( da.zeros(shape, dtype=np.float32, chunks=shape), dims=('y', 'x'), - attrs={'name': name, 'area': area}) + attrs={'name': name, 'area': area, '_satpy_id_keys': local_id_keys_config}) def _create_test_scenes(num_scenes=2, shape=DEFAULT_SHAPE, area=None): @@ -94,14 +125,14 @@ def test_init_children(self): def test_properties(self): """Test basic properties/attributes of the MultiScene.""" - from satpy import MultiScene, DatasetID + from satpy import MultiScene area = _create_test_area() scenes = _create_test_scenes(area=area) - ds1_id = DatasetID(name='ds1') - ds2_id = DatasetID(name='ds2') - ds3_id = DatasetID(name='ds3') - ds4_id = DatasetID(name='ds4') + ds1_id = make_dataid(name='ds1') + ds2_id = make_dataid(name='ds2') + ds3_id = make_dataid(name='ds3') + ds4_id = make_dataid(name='ds4') # Add a dataset to only one of the Scenes scenes[1]['ds3'] = _create_test_dataset('ds3') @@ -176,7 +207,8 @@ def test_from_files(self): assert len(mscn.scenes) == 12 def test_group(self): - from satpy import Scene, MultiScene, DatasetID + """Test group.""" + from satpy import Scene, MultiScene ds1 = _create_test_dataset(name='ds1') ds2 = _create_test_dataset(name='ds2') @@ -190,8 +222,8 @@ def test_group(self): scene2['ds4'] = ds4 multi_scene = MultiScene([scene1, scene2]) - groups = {DatasetID(name='odd', wavelength=(1, 2, 3)): ['ds1', 'ds3'], - DatasetID(name='even', wavelength=(2, 3, 4)): ['ds2', 'ds4']} + groups = {make_dataid(name='odd', wavelength=(1, 2, 3)): ['ds1', 'ds3'], + make_dataid(name='even', wavelength=(2, 3, 4)): ['ds2', 'ds4']} multi_scene.group(groups) self.assertSetEqual(multi_scene.shared_dataset_ids, set(groups.keys())) @@ -203,14 +235,13 @@ def test_add_group_aliases(self): import types from satpy.multiscene import add_group_aliases - from satpy import DatasetID from satpy import Scene # Define test scenes - ds_id1 = DatasetID('ds1', wavelength=(10.7, 10.8, 10.9)) - ds_id2 = DatasetID('ds2', wavelength=(1.9, 2.0, 2.1)) - ds_id3 = DatasetID('ds3', wavelength=(10.8, 10.9, 11.0)) - ds_id31 = DatasetID('ds31', polarization='H') + ds_id1 = make_dataid(name='ds1', wavelength=(10.7, 10.8, 10.9)) + ds_id2 = make_dataid(name='ds2', wavelength=(1.9, 2.0, 2.1)) + ds_id3 = make_dataid(name='ds3', wavelength=(10.8, 10.9, 11.0)) + ds_id31 = make_dataid(name='ds31', polarization='H') scene1 = Scene() scene1[ds_id1] = xr.DataArray([1]) @@ -222,8 +253,8 @@ def test_add_group_aliases(self): scenes = [scene1, scene2, scene3] # Define groups - g1 = DatasetID(name='g1', wavelength=(10, 11, 12)) - g2 = DatasetID(name='g2', wavelength=(1, 2, 3), polarization='V') + g1 = make_dataid(name='g1', wavelength=(10, 11, 12)) + g2 = make_dataid(name='g2', wavelength=(1, 2, 3), polarization='V') groups = {g1: ['ds1', 'ds3'], g2: ['ds2']} # Test adding aliases diff --git a/satpy/tests/test_readers.py b/satpy/tests/test_readers.py index 95d5a01e7b..3072e8222c 100644 --- a/satpy/tests/test_readers.py +++ b/satpy/tests/test_readers.py @@ -20,45 +20,74 @@ import os import unittest from unittest import mock +from satpy.dataset import WavelengthRange, ModifierTuple, DataID import pytest # clear the config dir environment variable so it doesn't interfere os.environ.pop("PPP_CONFIG_DIR", None) +local_id_keys_config = {'name': { + 'required': True, +}, + 'wavelength': { + 'type': WavelengthRange, +}, + 'resolution': None, + 'calibration': { + 'enum': [ + 'reflectance', + 'brightness_temperature', + 'radiance', + 'counts' + ] +}, + 'polarization': None, + 'level': None, + 'modifiers': { + 'required': True, + 'default': ModifierTuple(), + 'type': ModifierTuple, +}, +} + + +def make_dataid(**items): + """Make a data id.""" + return DataID(local_id_keys_config, **items) + class TestDatasetDict(unittest.TestCase): """Test DatasetDict and its methods.""" def setUp(self): """Create a test DatasetDict.""" - from satpy.dataset import DatasetID from satpy.readers import DatasetDict self.regular_dict = regular_dict = { - DatasetID(name="test", - wavelength=(0, 0.5, 1), - resolution=1000): "1", - DatasetID(name="testh", - wavelength=(0, 0.5, 1), - resolution=500): "1h", - DatasetID(name="test2", - wavelength=(1, 1.5, 2), - resolution=1000): "2", - DatasetID(name="test3", - wavelength=(1.2, 1.7, 2.2), - resolution=1000): "3", - DatasetID(name="test4", - calibration="radiance", - polarization="V"): "4rad", - DatasetID(name="test4", - calibration="reflectance", - polarization="H"): "4refl", - DatasetID(name="test5", - modifiers=('mod1', 'mod2')): "5_2mod", - DatasetID(name="test5", - modifiers=('mod2',)): "5_1mod", - DatasetID(name='test6', level=100): '6_100', - DatasetID(name='test6', level=200): '6_200', + make_dataid(name="test", + wavelength=(0, 0.5, 1), + resolution=1000): "1", + make_dataid(name="testh", + wavelength=(0, 0.5, 1), + resolution=500): "1h", + make_dataid(name="test2", + wavelength=(1, 1.5, 2), + resolution=1000): "2", + make_dataid(name="test3", + wavelength=(1.2, 1.7, 2.2), + resolution=1000): "3", + make_dataid(name="test4", + calibration="radiance", + polarization="V"): "4rad", + make_dataid(name="test4", + calibration="reflectance", + polarization="H"): "4refl", + make_dataid(name="test5", + modifiers=('mod1', 'mod2')): "5_2mod", + make_dataid(name="test5", + modifiers=('mod2',)): "5_1mod", + make_dataid(name='test6', level=100): '6_100', + make_dataid(name='test6', level=200): '6_200', } self.test_dict = DatasetDict(regular_dict) @@ -70,15 +99,14 @@ def test_init_noargs(self): def test_init_dict(self): """Test DatasetDict init with a regular dict argument.""" - from satpy.dataset import DatasetID from satpy.readers import DatasetDict - regular_dict = {DatasetID(name="test", wavelength=(0, 0.5, 1)): "1", } + regular_dict = {make_dataid(name="test", wavelength=(0, 0.5, 1)): "1", } d = DatasetDict(regular_dict) self.assertEqual(d, regular_dict) def test_getitem(self): """Test DatasetDict getitem with different arguments.""" - from satpy.dataset import DatasetID + from satpy.tests.utils import make_dsq d = self.test_dict # access by name self.assertEqual(d["test"], "1") @@ -89,29 +117,32 @@ def test_getitem(self): # access by near wavelength of another dataset self.assertEqual(d[1.65], "3") # access by name with multiple levels - self.assertEqual(d['test6'], '6_200') + self.assertEqual(d['test6'], '6_100') - self.assertEqual(d[DatasetID(wavelength=1.5)], "2") - self.assertEqual(d[DatasetID(wavelength=0.5, resolution=1000)], "1") - self.assertEqual(d[DatasetID(wavelength=0.5, resolution=500)], "1h") - self.assertEqual(d[DatasetID(name='test6', level=100)], '6_100') - self.assertEqual(d[DatasetID(name='test6', level=200)], '6_200') + self.assertEqual(d[make_dsq(wavelength=1.5)], "2") + self.assertEqual(d[make_dsq(wavelength=0.5, resolution=1000)], "1") + self.assertEqual(d[make_dsq(wavelength=0.5, resolution=500)], "1h") + self.assertEqual(d[make_dsq(name='test6', level=100)], '6_100') + self.assertEqual(d[make_dsq(name='test6', level=200)], '6_200') # higher resolution is returned self.assertEqual(d[0.5], "1h") self.assertEqual(d['test4'], '4refl') - self.assertEqual(d[DatasetID(name='test4', calibration='radiance')], '4rad') + self.assertEqual(d[make_dataid(name='test4', calibration='radiance')], '4rad') self.assertRaises(KeyError, d.getitem, '1h') + # test with full tuple + self.assertEqual(d[make_dsq(name='test', wavelength=(0, 0.5, 1), resolution=1000)], "1") + def test_get_key(self): """Test 'get_key' special functions.""" - from satpy import DatasetID from satpy.readers import get_key + from satpy.dataset import DataQuery d = self.test_dict - res1 = get_key(DatasetID(name='test4'), d, calibration='radiance') - res2 = get_key(DatasetID(name='test4'), d, calibration='radiance', + res1 = get_key(make_dataid(name='test4'), d, calibration='radiance') + res2 = get_key(make_dataid(name='test4'), d, calibration='radiance', num_results=0) - res3 = get_key(DatasetID(name='test4'), d, calibration='radiance', + res3 = get_key(make_dataid(name='test4'), d, calibration='radiance', num_results=3) self.assertEqual(len(res2), 1) self.assertEqual(len(res3), 1) @@ -119,25 +150,24 @@ def test_get_key(self): res3 = res3[0] self.assertEqual(res1, res2) self.assertEqual(res1, res3) + res1 = get_key('test4', d, query=DataQuery(polarization='V')) + self.assertEqual(res1, make_dataid(name='test4', calibration='radiance', + polarization='V')) - res1 = get_key('test4', d, polarization='V') - self.assertEqual(res1, DatasetID(name='test4', calibration='radiance', - polarization='V')) - - res1 = get_key(0.5, d, resolution=500) - self.assertEqual(res1, DatasetID(name='testh', - wavelength=(0, 0.5, 1), - resolution=500)) + res1 = get_key(0.5, d, query=DataQuery(resolution=500)) + self.assertEqual(res1, make_dataid(name='testh', + wavelength=(0, 0.5, 1), + resolution=500)) - res1 = get_key('test6', d, level=100) - self.assertEqual(res1, DatasetID(name='test6', - level=100)) + res1 = get_key('test6', d, query=DataQuery(level=100)) + self.assertEqual(res1, make_dataid(name='test6', + level=100)) res1 = get_key('test5', d) - res2 = get_key('test5', d, modifiers=('mod2',)) - res3 = get_key('test5', d, modifiers=('mod1', 'mod2',)) - self.assertEqual(res1, DatasetID(name='test5', - modifiers=('mod2',))) + res2 = get_key('test5', d, query=DataQuery(modifiers=('mod2',))) + res3 = get_key('test5', d, query=DataQuery(modifiers=('mod1', 'mod2',))) + self.assertEqual(res1, make_dataid(name='test5', + modifiers=('mod2',))) self.assertEqual(res1, res2) self.assertNotEqual(res1, res3) @@ -146,7 +176,6 @@ def test_get_key(self): def test_contains(self): """Test DatasetDict contains method.""" - from satpy.dataset import DatasetID d = self.test_dict self.assertIn('test', d) self.assertFalse(d.contains('test')) @@ -156,22 +185,22 @@ def test_contains(self): self.assertIn(1.5, d) self.assertIn(1.55, d) self.assertIn(1.65, d) - self.assertIn(DatasetID(name='test4', calibration='radiance'), d) + self.assertIn(make_dataid(name='test4', calibration='radiance'), d) self.assertIn('test4', d) def test_keys(self): """Test keys method of DatasetDict.""" - from satpy import DatasetID + from satpy.tests.utils import DataID d = self.test_dict self.assertEqual(len(d.keys()), len(self.regular_dict.keys())) - self.assertTrue(all(isinstance(x, DatasetID) for x in d.keys())) + self.assertTrue(all(isinstance(x, DataID) for x in d.keys())) name_keys = d.keys(names=True) self.assertListEqual(sorted(set(name_keys))[:4], [ 'test', 'test2', 'test3', 'test4']) wl_keys = tuple(d.keys(wavelengths=True)) self.assertIn((0, 0.5, 1), wl_keys) - self.assertIn((1, 1.5, 2), wl_keys) - self.assertIn((1.2, 1.7, 2.2), wl_keys) + self.assertIn((1, 1.5, 2, 'µm'), wl_keys) + self.assertIn((1.2, 1.7, 2.2, 'µm'), wl_keys) self.assertIn(None, wl_keys) def test_setitem(self): diff --git a/satpy/tests/test_regressions.py b/satpy/tests/test_regressions.py new file mode 100644 index 0000000000..58d35c8950 --- /dev/null +++ b/satpy/tests/test_regressions.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2020 Satpy developers +# +# This file is part of satpy. +# +# satpy is free software: you can redistribute it and/or modify it under the +# terms of the GNU General Public License as published by the Free Software +# Foundation, either version 3 of the License, or (at your option) any later +# version. +# +# satpy is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along with +# satpy. If not, see . +"""Test fixed bugs.""" + + +from unittest.mock import patch + +import dask.array as da +import numpy as np +from xarray import DataArray, Dataset +from satpy.tests.utils import make_dataid + + +abi_file_list = ['/data/OR_ABI-L1b-RadF-M3C01_G16_s20180722030423_e20180722041189_c20180722041235-118900_0.nc', + '/data/OR_ABI-L1b-RadF-M3C02_G16_s20180722030423_e20180722041190_c20180722041228-120000_0.nc', + '/data/OR_ABI-L1b-RadF-M3C03_G16_s20180722030423_e20180722041190_c20180722041237-119000_0.nc', + '/data/OR_ABI-L1b-RadF-M3C04_G16_s20180722030423_e20180722041189_c20180722041221.nc', + '/data/OR_ABI-L1b-RadF-M3C05_G16_s20180722030423_e20180722041190_c20180722041237-119101_0.nc', + '/data/OR_ABI-L1b-RadF-M3C06_G16_s20180722030423_e20180722041195_c20180722041227.nc', + '/data/OR_ABI-L1b-RadF-M3C07_G16_s20180722030423_e20180722041201_c20180722041238.nc', + '/data/OR_ABI-L1b-RadF-M3C08_G16_s20180722030423_e20180722041190_c20180722041238.nc', + '/data/OR_ABI-L1b-RadF-M3C09_G16_s20180722030423_e20180722041195_c20180722041256.nc', + '/data/OR_ABI-L1b-RadF-M3C10_G16_s20180722030423_e20180722041201_c20180722041250.nc', + '/data/OR_ABI-L1b-RadF-M3C11_G16_s20180722030423_e20180722041189_c20180722041254.nc', + '/data/OR_ABI-L1b-RadF-M3C12_G16_s20180722030423_e20180722041195_c20180722041256.nc', + '/data/OR_ABI-L1b-RadF-M3C13_G16_s20180722030423_e20180722041201_c20180722041259.nc', + '/data/OR_ABI-L1b-RadF-M3C14_G16_s20180722030423_e20180722041190_c20180722041258.nc', + '/data/OR_ABI-L1b-RadF-M3C15_G16_s20180722030423_e20180722041195_c20180722041259.nc', + '/data/OR_ABI-L1b-RadF-M3C16_G16_s20180722030423_e20180722041202_c20180722041259.nc'] + + +def generate_fake_abi_xr_dataset(filename, chunks=None, **kwargs): + """Create a fake xarray dataset for abi data. + + This is an incomplete copy of existing file structures. + """ + dataset = Dataset(attrs={ + 'time_coverage_start': '2018-03-13T20:30:42.3Z', + 'time_coverage_end': '2018-03-13T20:41:18.9Z', + }) + + projection = DataArray( + [-214748364], + attrs={ + 'long_name': 'GOES-R ABI fixed grid projection', + 'grid_mapping_name': 'geostationary', + 'perspective_point_height': 35786023.0, + 'semi_major_axis': 6378137.0, + 'semi_minor_axis': 6356752.31414, + 'inverse_flattening': 298.2572221, + 'latitude_of_projection_origin': 0.0, + 'longitude_of_projection_origin': -75.0, + 'sweep_angle_axis': 'x' + }) + dataset['goes_imager_projection'] = projection + + if 'C01' in filename or 'C03' in filename or 'C05' in filename: + stop = 10847 + step = 2 + scale = 2.8e-05 + offset = 0.151858 + elif 'C02' in filename: + stop = 21693 + step = 4 + scale = 1.4e-05 + offset = 0.151865 + else: + stop = 5424 + step = 1 + scale = 5.6e-05 + offset = 0.151844 + + y = DataArray( + da.arange(0, stop, step), + attrs={ + 'scale_factor': -scale, + 'add_offset': offset, + 'units': 'rad', + 'axis': 'Y', + 'long_name': 'GOES fixed grid projection y-coordinate', + 'standard_name': 'projection_y_coordinate' + }, + dims=['y']) + + dataset['y'] = y + + x = DataArray( + da.arange(0, stop, step), + attrs={ + 'scale_factor': scale, + 'add_offset': -offset, + 'units': 'rad', + 'axis': 'X', + 'long_name': 'GOES fixed grid projection x-coordinate', + 'standard_name': 'projection_x_coordinate' + }, + dims=['x']) + + dataset['x'] = x + + rad = DataArray( + da.random.randint(0, 1025, size=[len(y), len(x)], dtype=np.int16, chunks=chunks), + attrs={ + '_FillValue': np.array(1023), + 'long_name': 'ABI L1b Radiances', + 'standard_name': 'toa_outgoing_radiance_per_unit_wavelength', + '_Unsigned': 'true', + 'sensor_band_bit_depth': 10, + 'valid_range': np.array([0, 1022], dtype=np.int16), + 'scale_factor': 0.8121064, + 'add_offset': -25.936647, + 'units': 'W m-2 sr-1 um-1', + 'resolution': 'y: 0.000028 rad x: 0.000028 rad', + 'grid_mapping': 'goes_imager_projection', + 'cell_methods': 't: point area: point' + }, + dims=['y', 'x'] + ) + + dataset['Rad'] = rad + + sublat = DataArray(0.0, attrs={ + 'long_name': 'nominal satellite subpoint latitude (platform latitude)', + 'standard_name': 'latitude', + '_FillValue': -999.0, + 'units': 'degrees_north'}) + dataset['nominal_satellite_subpoint_lat'] = sublat + + sublon = DataArray(-75.0, attrs={ + 'long_name': 'nominal satellite subpoint longitude (platform longitude)', + 'standard_name': 'longitude', + '_FillValue': -999.0, + 'units': 'degrees_east'}) + + dataset['nominal_satellite_subpoint_lon'] = sublon + + satheight = DataArray(35786.023, attrs={ + 'long_name': 'nominal satellite height above GRS 80 ellipsoid (platform altitude)', + 'standard_name': 'height_above_reference_ellipsoid', + '_FillValue': -999.0, + 'units': 'km'}) + + dataset['nominal_satellite_height'] = satheight + + yaw_flip_flag = DataArray(0, attrs={ + 'long_name': 'Flag indicating the spacecraft is operating in yaw flip configuration', + '_Unsigned': 'true', + '_FillValue': np.array(-1), + 'valid_range': np.array([0, 1], dtype=np.int8), + 'units': '1', + 'flag_values': '0 1', + 'flag_meanings': 'false true'}) + + dataset['yaw_flip_flag'] = yaw_flip_flag + + return dataset + + +@patch('xarray.open_dataset') +def test_1258(fake_open_dataset): + """Save true_color from abi with radiance doesn't need two resamplings.""" + from satpy import Scene + fake_open_dataset.side_effect = generate_fake_abi_xr_dataset + + scene = Scene(abi_file_list, reader='abi_l1b') + scene.load(['true_color_nocorr', 'C04'], calibration='radiance') + resampled_scene = scene.resample(scene.min_area(), resampler='native') + assert len(resampled_scene.keys()) == 2 + + +@patch('xarray.open_dataset') +def test_1088(fake_open_dataset): + """Check that copied arrays gets resampled.""" + from satpy import Scene + fake_open_dataset.side_effect = generate_fake_abi_xr_dataset + + scene = Scene(abi_file_list, reader='abi_l1b') + scene.load(['C04'], calibration='radiance') + + my_id = make_dataid(name='my_name', wavelength=(10, 11, 12)) + scene[my_id] = scene['C04'].copy() + resampled = scene.resample('eurol') + assert resampled[my_id].shape == (2048, 2560) + + +@patch('xarray.open_dataset') +def test_no_enums(fake_open_dataset): + """Check that no enums are inserted in the resulting attrs.""" + from satpy import Scene + from enum import Enum + fake_open_dataset.side_effect = generate_fake_abi_xr_dataset + + scene = Scene(abi_file_list, reader='abi_l1b') + scene.load(['C04'], calibration='radiance') + for value in scene['C04'].attrs.values(): + assert not isinstance(value, Enum) diff --git a/satpy/tests/test_scene.py b/satpy/tests/test_scene.py index 7cfda4e4ed..1d390d11d3 100644 --- a/satpy/tests/test_scene.py +++ b/satpy/tests/test_scene.py @@ -21,6 +21,11 @@ import unittest from unittest import mock +import pytest + +from satpy.tests.utils import (default_id_keys_config, make_cid, make_dataid, + make_dsq) + # clear the config dir environment variable so it doesn't interfere os.environ.pop("PPP_CONFIG_DIR", None) @@ -241,7 +246,7 @@ def test_iter_by_area_swath(self): scene["2"] = DataArray(np.arange(5), attrs={'area': sd}) scene["3"] = DataArray(np.arange(5)) for area_obj, ds_list in scene.iter_by_area(): - ds_list_names = set(ds.name for ds in ds_list) + ds_list_names = set(ds['name'] for ds in ds_list) if area_obj is sd: self.assertSetEqual(ds_list_names, {'1', '2'}) else: @@ -257,15 +262,27 @@ def test_bad_setitem(self): def test_setitem(self): """Test setting an item.""" - from satpy import Scene, DatasetID + from satpy import Scene + from satpy.tests.utils import make_dataid import numpy as np import xarray as xr scene = Scene() scene["1"] = ds1 = xr.DataArray(np.arange(5)) - expected_id = DatasetID.from_dict(ds1.attrs) + expected_id = make_cid(**ds1.attrs) self.assertSetEqual(set(scene.datasets.keys()), {expected_id}) self.assertSetEqual(set(scene.wishlist), {expected_id}) + did = make_dataid(name='oranges') + scene[did] = ds1 + assert 'oranges' in scene + nparray = np.arange(5*5).reshape(5, 5) + with pytest.raises(ValueError): + scene['apples'] = nparray + assert 'apples' not in scene + did = make_dataid(name='apples') + scene[did] = nparray + assert 'apples' in scene + def test_getitem(self): """Test __getitem__ with names only.""" from satpy import Scene @@ -284,36 +301,36 @@ def test_getitem(self): def test_getitem_modifiers(self): """Test __getitem__ with names and modifiers.""" - from satpy import Scene, DatasetID + from satpy import Scene from xarray import DataArray import numpy as np # Return least modified item scene = Scene() scene['1'] = ds1_m0 = DataArray(np.arange(5)) - scene[DatasetID(name='1', modifiers=('mod1',)) + scene[make_dataid(name='1', modifiers=('mod1',)) ] = ds1_m1 = DataArray(np.arange(5)) self.assertIs(scene['1'], ds1_m0) self.assertEqual(len(list(scene.keys())), 2) scene = Scene() scene['1'] = ds1_m0 = DataArray(np.arange(5)) - scene[DatasetID(name='1', modifiers=('mod1',)) + scene[make_dataid(name='1', modifiers=('mod1',)) ] = ds1_m1 = DataArray(np.arange(5)) - scene[DatasetID(name='1', modifiers=('mod1', 'mod2')) + scene[make_dataid(name='1', modifiers=('mod1', 'mod2')) ] = ds1_m2 = DataArray(np.arange(5)) self.assertIs(scene['1'], ds1_m0) self.assertEqual(len(list(scene.keys())), 3) scene = Scene() - scene[DatasetID(name='1', modifiers=('mod1', 'mod2')) + scene[make_dataid(name='1', modifiers=('mod1', 'mod2')) ] = ds1_m2 = DataArray(np.arange(5)) - scene[DatasetID(name='1', modifiers=('mod1',)) + scene[make_dataid(name='1', modifiers=('mod1',)) ] = ds1_m1 = DataArray(np.arange(5)) self.assertIs(scene['1'], ds1_m1) - self.assertIs(scene[DatasetID('1', modifiers=('mod1', 'mod2'))], ds1_m2) + self.assertIs(scene[make_dataid(name='1', modifiers=('mod1', 'mod2'))], ds1_m2) self.assertRaises(KeyError, scene.__getitem__, - DatasetID(name='1', modifiers=tuple())) + make_dataid(name='1', modifiers=tuple())) self.assertEqual(len(list(scene.keys())), 2) def test_getitem_slices(self): @@ -553,10 +570,11 @@ def test_aggregate(self): area_extent, ) - scene1["1"] = DataArray(np.ones((y_size, x_size))) - scene1["2"] = DataArray(np.ones((y_size, x_size)), dims=('y', 'x')) + scene1["1"] = DataArray(np.ones((y_size, x_size)), attrs={'_satpy_id_keys': default_id_keys_config}) + scene1["2"] = DataArray(np.ones((y_size, x_size)), dims=('y', 'x'), + attrs={'_satpy_id_keys': default_id_keys_config}) scene1["3"] = DataArray(np.ones((y_size, x_size)), dims=('y', 'x'), - attrs={'area': area_def}) + attrs={'area': area_def, '_satpy_id_keys': default_id_keys_config}) scene2 = scene1.aggregate(func='sum', x=2, y=2) self.assertIs(scene1['1'], scene2['1']) @@ -572,21 +590,36 @@ def test_contains(self): from xarray import DataArray import numpy as np scene = Scene() - scene["1"] = DataArray(np.arange(5), attrs={'wavelength': (0.1, 0.2, 0.3)}) + scene["1"] = DataArray(np.arange(5), attrs={'wavelength': (0.1, 0.2, 0.3), + '_satpy_id_keys': default_id_keys_config}) self.assertTrue('1' in scene) self.assertTrue(0.15 in scene) self.assertFalse('2' in scene) self.assertFalse(0.31 in scene) + scene = Scene() + scene['blueberry'] = DataArray(np.arange(5)) + scene['blackberry'] = DataArray(np.arange(5)) + scene['strawberry'] = DataArray(np.arange(5)) + scene['raspberry'] = DataArray(np.arange(5)) + # deepcode ignore replace~keys~list~compare: This is on purpose + assert make_cid(name='blueberry') in scene.keys() + assert make_cid(name='blueberry') in scene + assert 'blueberry' in scene + assert 'blueberry' not in scene.keys() + def test_delitem(self): """Test deleting an item.""" from satpy import Scene from xarray import DataArray import numpy as np scene = Scene() - scene["1"] = DataArray(np.arange(5), attrs={'wavelength': (0.1, 0.2, 0.3)}) - scene["2"] = DataArray(np.arange(5), attrs={'wavelength': (0.4, 0.5, 0.6)}) - scene["3"] = DataArray(np.arange(5), attrs={'wavelength': (0.7, 0.8, 0.9)}) + scene["1"] = DataArray(np.arange(5), attrs={'wavelength': (0.1, 0.2, 0.3), + '_satpy_id_keys': default_id_keys_config}) + scene["2"] = DataArray(np.arange(5), attrs={'wavelength': (0.4, 0.5, 0.6), + '_satpy_id_keys': default_id_keys_config}) + scene["3"] = DataArray(np.arange(5), attrs={'wavelength': (0.7, 0.8, 0.9), + '_satpy_id_keys': default_id_keys_config}) del scene['1'] del scene['3'] del scene[0.45] @@ -812,7 +845,7 @@ def test_load_no_exist(self, cri, cl): def test_load_no_exist2(self, cri, cl): """Test loading a dataset that doesn't exist then another load.""" from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID, Scene + from satpy import Scene r = FakeReader('fake_reader', 'fake_sensor') cri.return_value = {'fake_reader': r} comps, mods = test_composites('fake_sensor') @@ -824,13 +857,16 @@ def test_load_no_exist2(self, cri, cl): loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 0) r.load.assert_called_once_with( - set([DatasetID(name='ds9_fail_load', wavelength=(1.0, 1.1, 1.2))])) + set([make_dataid(name='ds9_fail_load', wavelength=(1.0, 1.1, 1.2))])) scene.load(['ds1']) loaded_ids = list(scene.datasets.keys()) self.assertEqual(r.load.call_count, 2) # most recent call should have only been ds1 - r.load.assert_called_with(set([DatasetID(name='ds1')])) + r.load.assert_called_with(set([make_dataid(name='ds1', + resolution=250, + calibration='reflectance', + modifiers=tuple())])) self.assertEqual(len(loaded_ids), 1) @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -838,7 +874,6 @@ def test_load_ds1_no_comps(self, cri): """Test loading one dataset with no loaded compositors.""" import satpy.scene from satpy.tests.utils import FakeReader - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} scene = satpy.scene.Scene(filenames=['bla'], @@ -846,16 +881,14 @@ def test_load_ds1_no_comps(self, cri): reader='fake_reader') scene.load(['ds1']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='ds1'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_dataid(name='ds1', resolution=250, calibration='reflectance', modifiers=tuple()) @mock.patch('satpy.scene.Scene.create_reader_instances') def test_load_ds1_load_twice(self, cri): """Test loading one dataset with no loaded compositors.""" import satpy.scene from satpy.tests.utils import FakeReader - from satpy import DatasetID r = FakeReader('fake_reader', 'fake_sensor') cri.return_value = {'fake_reader': r} scene = satpy.scene.Scene(filenames=['bla'], @@ -863,16 +896,17 @@ def test_load_ds1_load_twice(self, cri): reader='fake_reader') scene.load(['ds1']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='ds1'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_dataid(name='ds1', resolution=250, calibration='reflectance', modifiers=tuple()) with mock.patch.object(r, 'load') as m: scene.load(['ds1']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='ds1'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_dataid(name='ds1', + resolution=250, + calibration='reflectance', + modifiers=tuple()) self.assertFalse( m.called, "Reader.load was called again when loading something that's already loaded") @@ -882,7 +916,6 @@ def test_load_ds1_unknown_modifier(self, cri, cl): """Test loading one dataset with no loaded compositors.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -891,7 +924,7 @@ def test_load_ds1_unknown_modifier(self, cri, cl): base_dir='bli', reader='fake_reader') self.assertRaises(KeyError, scene.load, - [DatasetID(name='ds1', modifiers=('_fake_bad_mod_',))]) + [make_dataid(name='ds1', modifiers=('_fake_bad_mod_',))]) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -909,7 +942,7 @@ def test_load_ds4_cal(self, cri, cl): scene.load(['ds4']) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 1) - self.assertEqual(loaded_ids[0].calibration, 'reflectance') + self.assertEqual(loaded_ids[0]['calibration'], 'reflectance') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -927,8 +960,8 @@ def test_load_ds5_best_resolution(self, cri, cl): scene.load(['ds5']) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 1) - self.assertEqual(loaded_ids[0].name, 'ds5') - self.assertEqual(loaded_ids[0].resolution, 250) + self.assertEqual(loaded_ids[0]['name'], 'ds5') + self.assertEqual(loaded_ids[0]['resolution'], 250) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -947,21 +980,57 @@ def test_load_ds5_multiple_resolution(self, cri, cl): scene.load(['ds5'], resolution=500) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 2) - self.assertEqual(loaded_ids[0].name, 'ds5') - self.assertEqual(loaded_ids[0].resolution, 500) - self.assertEqual(loaded_ids[1].name, 'ds5') - self.assertEqual(loaded_ids[1].resolution, 1000) + self.assertEqual(loaded_ids[0]['name'], 'ds5') + self.assertEqual(loaded_ids[0]['resolution'], 500) + self.assertEqual(loaded_ids[1]['name'], 'ds5') + self.assertEqual(loaded_ids[1]['resolution'], 1000) + + @mock.patch('satpy.composites.CompositorLoader.load_compositors') + @mock.patch('satpy.scene.Scene.create_reader_instances') + def test_load_ds5_resolution_list(self, cri, cl): + """Test loading a dataset has multiple resolutions available with different resolutions.""" + import satpy.scene + from satpy.tests.utils import FakeReader, test_composites + cri.return_value = {'fake_reader': FakeReader( + 'fake_reader', 'fake_sensor')} + comps, mods = test_composites('fake_sensor') + cl.return_value = (comps, mods) + scene = satpy.scene.Scene(filenames=['bla'], + base_dir='bli', + reader='fake_reader') + scene.load(['ds5'], resolution=[500, 1000]) + loaded_ids = list(scene.datasets.keys()) + assert len(loaded_ids) == 1 + assert loaded_ids[0]['name'] == 'ds5' + assert loaded_ids[0]['resolution'] == 500 + + @mock.patch('satpy.composites.CompositorLoader.load_compositors') + @mock.patch('satpy.scene.Scene.create_reader_instances') + def test_load_ds5_empty_modifiers(self, cri, cl): + """Test loading a dataset has multiple resolutions available with different resolutions.""" + import satpy.scene + from satpy.tests.utils import FakeReader, test_composites + cri.return_value = {'fake_reader': FakeReader( + 'fake_reader', 'fake_sensor')} + comps, mods = test_composites('fake_sensor') + cl.return_value = (comps, mods) + scene = satpy.scene.Scene(filenames=['bla'], + base_dir='bli', + reader='fake_reader') + scene.load([make_dsq(name='ds5', modifiers=tuple())]) + loaded_ids = list(scene.datasets.keys()) + assert len(loaded_ids) == 1 + assert loaded_ids[0]['name'] == 'ds5' @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') def test_load_ds5_missing_best_resolution(self, cri, cl): """Test loading a dataset that has multiple resolutions but the best isn't available.""" import satpy.scene - from satpy import DatasetID from satpy.tests.utils import FakeReader, test_composites # only the 500m is available - available_datasets = [DatasetID('ds5', resolution=500)] + available_datasets = [make_dataid(name='ds5', resolution=500)] cri.return_value = { 'fake_reader': FakeReader( 'fake_reader', 'fake_sensor', datasets=['ds5'], @@ -975,8 +1044,8 @@ def test_load_ds5_missing_best_resolution(self, cri, cl): scene.load(['ds5']) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 1) - self.assertEqual(loaded_ids[0].name, 'ds5') - self.assertEqual(loaded_ids[0].resolution, 500) + self.assertEqual(loaded_ids[0]['name'], 'ds5') + self.assertEqual(loaded_ids[0]['resolution'], 500) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -994,7 +1063,7 @@ def test_load_ds6_wl(self, cri, cl): scene.load([0.22]) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 1) - self.assertEqual(loaded_ids[0].name, 'ds6') + self.assertEqual(loaded_ids[0]['name'], 'ds6') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1019,7 +1088,6 @@ def test_load_comp1(self, cri, cl): """Test loading a composite with one required prereq.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1029,9 +1097,8 @@ def test_load_comp1(self, cri, cl): reader='fake_reader') scene.load(['comp1']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp1'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp1') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1039,7 +1106,6 @@ def test_load_comp4(self, cri, cl): """Test loading a composite that depends on a composite.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1049,9 +1115,8 @@ def test_load_comp4(self, cri, cl): reader='fake_reader') scene.load(['comp4']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp4'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp4') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1059,7 +1124,6 @@ def test_load_multiple_resolutions(self, cri, cl): """Test loading a dataset has multiple resolutions available with different resolutions.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1067,16 +1131,16 @@ def test_load_multiple_resolutions(self, cri, cl): scene = satpy.scene.Scene(filenames=['bla'], base_dir='bli', reader='fake_reader') - comp25 = DatasetID(name='comp25', resolution=1000) + comp25 = make_cid(name='comp25', resolution=1000) scene[comp25] = 'bla' scene.load(['comp25'], resolution=500) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 2) - self.assertEqual(loaded_ids[0].name, 'comp25') - self.assertEqual(loaded_ids[0].resolution, 500) - self.assertEqual(loaded_ids[1].name, 'comp25') - self.assertEqual(loaded_ids[1].resolution, 1000) + self.assertEqual(loaded_ids[0]['name'], 'comp25') + self.assertEqual(loaded_ids[0]['resolution'], 500) + self.assertEqual(loaded_ids[1]['name'], 'comp25') + self.assertEqual(loaded_ids[1]['resolution'], 1000) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1095,10 +1159,10 @@ def test_load_same_subcomposite(self, cri, cl): scene.load(['comp24', 'comp25'], resolution=500) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 2) - self.assertEqual(loaded_ids[0].name, 'comp24') - self.assertEqual(loaded_ids[0].resolution, 500) - self.assertEqual(loaded_ids[1].name, 'comp25') - self.assertEqual(loaded_ids[1].resolution, 500) + self.assertEqual(loaded_ids[0]['name'], 'comp24') + self.assertEqual(loaded_ids[0]['resolution'], 500) + self.assertEqual(loaded_ids[1]['name'], 'comp25') + self.assertEqual(loaded_ids[1]['resolution'], 500) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1106,7 +1170,6 @@ def test_load_comp5(self, cri, cl): """Test loading a composite that has an optional prerequisite.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1116,9 +1179,8 @@ def test_load_comp5(self, cri, cl): reader='fake_reader') scene.load(['comp5']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp5'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp5') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1126,7 +1188,6 @@ def test_load_comp6(self, cri, cl): """Test loading a composite that has an optional composite prerequisite.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1137,8 +1198,7 @@ def test_load_comp6(self, cri, cl): scene.load(['comp6']) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp6'))) + assert loaded_ids[0] == make_cid(name='comp6') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1161,7 +1221,6 @@ def test_load_comp9(self, cri, cl): """Test loading a composite that has a non-existent optional prereq.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1172,9 +1231,8 @@ def test_load_comp9(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp9']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp9'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp9') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1182,7 +1240,6 @@ def test_load_comp10(self, cri, cl): """Test loading a composite that depends on a modified dataset.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1193,9 +1250,8 @@ def test_load_comp10(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp10']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp10'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp10') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1203,7 +1259,6 @@ def test_load_comp11(self, cri, cl): """Test loading a composite that depends all wavelengths.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1214,9 +1269,8 @@ def test_load_comp11(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp11']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp11'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp11') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1224,7 +1278,6 @@ def test_load_comp12(self, cri, cl): """Test loading a composite that depends all wavelengths that get modified.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1235,9 +1288,8 @@ def test_load_comp12(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp12']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp12'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp12') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1245,7 +1297,6 @@ def test_load_comp13(self, cri, cl): """Test loading a composite that depends on a modified dataset where the resolution changes.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1256,14 +1307,13 @@ def test_load_comp13(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp13']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp13'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp13') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') def test_load_comp14(self, cri, cl): - """Test loading a composite that updates the DatasetID during generation.""" + """Test loading a composite that updates the DataID during generation.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites cri.return_value = {'fake_reader': FakeReader( @@ -1276,8 +1326,8 @@ def test_load_comp14(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp14']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertEqual(loaded_ids[0].name, 'comp14') + assert len(loaded_ids) == 1 + assert loaded_ids[0]['name'] == 'comp14' @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1299,7 +1349,7 @@ def test_load_comp15(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp15']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 0) + assert len(loaded_ids) == 0 @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1321,8 +1371,8 @@ def test_load_comp16(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp16']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertEqual(loaded_ids[0].name, 'comp16') + assert len(loaded_ids) == 1 + assert loaded_ids[0]['name'] == 'comp16' @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1340,7 +1390,7 @@ def test_load_comp17(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp17']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 0) + assert len(loaded_ids) == 0 @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1348,7 +1398,6 @@ def test_load_comp18(self, cri, cl): """Test loading a composite that depends on a incompatible area modified dataset.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1367,11 +1416,11 @@ def test_load_comp18(self, cri, cl): # for the incomp_areas modifier self.assertEqual(len(loaded_ids), 4) # the 1 dependencies self.assertIn('ds3', scene.datasets) - self.assertIn(DatasetID(name='ds4', calibration='reflectance', - modifiers=('mod1', 'mod3')), + self.assertIn(make_dataid(name='ds4', calibration='reflectance', + modifiers=('mod1', 'mod3')), scene.datasets) - self.assertIn(DatasetID(name='ds5', resolution=250, - modifiers=('mod1',)), + self.assertIn(make_dataid(name='ds5', resolution=250, + modifiers=('mod1',)), scene.datasets) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @@ -1385,7 +1434,6 @@ def test_load_comp18_2(self, cri, cl): """ import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1405,11 +1453,11 @@ def test_load_comp18_2(self, cri, cl): self.assertEqual(len(loaded_ids), 5) # the 1 dependencies self.assertIn('ds3', scene.datasets) self.assertIn('ds2', scene.datasets) - self.assertIn(DatasetID(name='ds4', calibration='reflectance', - modifiers=('mod1', 'mod3')), + self.assertIn(make_dataid(name='ds4', calibration='reflectance', + modifiers=('mod1', 'mod3')), scene.datasets) - self.assertIn(DatasetID(name='ds5', resolution=250, - modifiers=('mod1',)), + self.assertIn(make_dataid(name='ds5', resolution=250, + modifiers=('mod1',)), scene.datasets) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @@ -1425,7 +1473,6 @@ def test_load_comp19(self, cri, cl): """ import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1438,7 +1485,7 @@ def test_load_comp19(self, cri, cl): # initialize the dep tree without loading the data scene.dep_tree.find_dependencies({'comp19'}) this_node = scene.dep_tree['comp19'] - shared_dep_id = DatasetID(name='ds5', modifiers=('res_change',)) + shared_dep_id = make_dataid(name='ds5', modifiers=('res_change',)) shared_dep_expected_node = scene.dep_tree[shared_dep_id] # get the node for the first dep in the prereqs list of the # comp13 node @@ -1451,9 +1498,8 @@ def test_load_comp19(self, cri, cl): scene.load(['comp19']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp19'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp19') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1471,7 +1517,7 @@ def test_load_multiple_comps(self, cri, cl): scene.load(['comp1', 'comp2', 'comp3', 'comp4', 'comp5', 'comp6', 'comp7', 'comp9', 'comp10']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 9) + assert len(loaded_ids) == 9 @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1496,7 +1542,7 @@ def test_load_multiple_comps_separate(self, cri, cl): scene.load(['comp2']) scene.load(['comp1']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 9) + assert len(loaded_ids) == 9 @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1504,7 +1550,6 @@ def test_load_modified(self, cri, cl): """Test loading a modified dataset.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1512,10 +1557,10 @@ def test_load_modified(self, cri, cl): scene = satpy.scene.Scene(filenames=['bla'], base_dir='bli', reader='fake_reader') - scene.load([DatasetID(name='ds1', modifiers=('mod1', 'mod2'))]) + scene.load([make_dsq(name='ds1', modifiers=('mod1', 'mod2'))]) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual(loaded_ids[0].modifiers, ('mod1', 'mod2')) + assert len(loaded_ids) == 1 + assert loaded_ids[0]['modifiers'] == ('mod1', 'mod2') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1523,7 +1568,6 @@ def test_load_multiple_modified(self, cri, cl): """Test loading multiple modified datasets.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1532,17 +1576,17 @@ def test_load_multiple_modified(self, cri, cl): base_dir='bli', reader='fake_reader') scene.load([ - DatasetID(name='ds1', modifiers=('mod1', 'mod2')), - DatasetID(name='ds2', modifiers=('mod2', 'mod1')), + make_dataid(name='ds1', modifiers=('mod1', 'mod2')), + make_dataid(name='ds2', modifiers=('mod2', 'mod1')), ]) loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 2) for i in loaded_ids: - if i.name == 'ds1': - self.assertTupleEqual(i.modifiers, ('mod1', 'mod2')) + if i['name'] == 'ds1': + self.assertTupleEqual(i['modifiers'], ('mod1', 'mod2')) else: - self.assertEqual(i.name, 'ds2') - self.assertTupleEqual(i.modifiers, ('mod2', 'mod1')) + self.assertEqual(i['name'], 'ds2') + self.assertTupleEqual(i['modifiers'], ('mod2', 'mod1')) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1576,7 +1620,6 @@ def test_load_dataset_after_composite2(self, cri, cl): """Test load complex composite followed by other datasets.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID r = FakeReader('fake_reader', 'fake_sensor') cri.return_value = {'fake_reader': r} comps, mods = test_composites('fake_sensor') @@ -1594,7 +1637,8 @@ def test_load_dataset_after_composite2(self, cri, cl): loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 2) # this is the unmodified ds1 - self.assertIn(DatasetID(name='ds1'), loaded_ids) + self.assertIn(make_dataid(name='ds1', resolution=250, calibration='reflectance', modifiers=tuple()), + loaded_ids) # m.assert_called_once_with(set([scene.dep_tree['ds1']])) m.assert_called_once_with(set()) with mock.patch.object(scene, '_read_composites', wraps=scene._read_composites) as m: @@ -1603,7 +1647,8 @@ def test_load_dataset_after_composite2(self, cri, cl): loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 2) # this is the unmodified ds1 - self.assertIn(DatasetID(name='ds1'), loaded_ids) + self.assertIn(make_dataid(name='ds1', resolution=250, calibration='reflectance', modifiers=tuple()), + loaded_ids) m.assert_called_once_with(set()) # we should only generate the composite once self.assertEqual(comps['fake_sensor'][ @@ -1620,7 +1665,6 @@ def test_load_comp20(self, cri, cl): """Test loading composite with optional modifier dependencies.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1631,9 +1675,8 @@ def test_load_comp20(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp20']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp20'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp20') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1641,7 +1684,6 @@ def test_load_comp21(self, cri, cl): """Test loading composite with bad optional modifier dependencies.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1652,9 +1694,8 @@ def test_load_comp21(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp21']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp21'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp21') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1662,7 +1703,6 @@ def test_load_comp22(self, cri, cl): """Test loading composite with only optional modifier dependencies.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1673,9 +1713,8 @@ def test_load_comp22(self, cri, cl): # it is fine that an optional prereq doesn't exist scene.load(['comp22']) loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 1) - self.assertTupleEqual( - tuple(loaded_ids[0]), tuple(DatasetID(name='comp22'))) + assert len(loaded_ids) == 1 + assert loaded_ids[0] == make_cid(name='comp22') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1692,14 +1731,14 @@ def test_no_generate_comp10(self, cri, cl): reader='fake_reader') # it is fine that an optional prereq doesn't exist scene.load(['comp10'], generate=False) - self.assertTrue(any(ds_id == 'comp10' for ds_id in scene.wishlist)) + self.assertTrue(any(ds_id['name'] == 'comp10' for ds_id in scene.wishlist)) self.assertNotIn('comp10', scene.datasets) # two dependencies should have been loaded self.assertEqual(len(scene.datasets), 2) self.assertEqual(len(scene.missing_datasets), 1) scene.generate_composites() - self.assertTrue(any(ds_id == 'comp10' for ds_id in scene.wishlist)) + self.assertTrue(any(ds_id['name'] == 'comp10' for ds_id in scene.wishlist)) self.assertIn('comp10', scene.datasets) self.assertEqual(len(scene.missing_datasets), 0) @@ -1710,12 +1749,12 @@ def test_modified_with_wl_dep(self, cri, cl): More importantly test that loading the modifiers dependency at the same time as the original modified dataset that the dependency tree - nodes are unique and that DatasetIDs. + nodes are unique and that DataIDs. """ import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID + from satpy.dataset import WavelengthRange cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1726,19 +1765,18 @@ def test_modified_with_wl_dep(self, cri, cl): # Check dependency tree nodes # initialize the dep tree without loading the data - ds1_mod_id = DatasetID(name='ds1', modifiers=('mod_wl',)) - ds3_mod_id = DatasetID(name='ds3', modifiers=('mod_wl',)) + ds1_mod_id = make_dsq(name='ds1', modifiers=('mod_wl',)) + ds3_mod_id = make_dsq(name='ds3', modifiers=('mod_wl',)) scene.dep_tree.find_dependencies({ds1_mod_id, ds3_mod_id}) ds1_mod_node = scene.dep_tree[ds1_mod_id] ds3_mod_node = scene.dep_tree[ds3_mod_id] ds1_mod_dep_node = ds1_mod_node.data[1][1] ds3_mod_dep_node = ds3_mod_node.data[1][1] # mod_wl depends on the this node: - ds6_modded_node = scene.dep_tree[DatasetID(name='ds6', modifiers=('mod1',))] + ds6_modded_node = scene.dep_tree[make_dataid(name='ds6', modifiers=('mod1',))] # this dep should be full qualified with name and wavelength - self.assertIsNotNone(ds6_modded_node.name.name) - self.assertIsNotNone(ds6_modded_node.name.wavelength) - self.assertEqual(len(ds6_modded_node.name.wavelength), 3) + self.assertIsNotNone(ds6_modded_node.name['name']) + assert isinstance(ds6_modded_node.name['wavelength'], WavelengthRange) # the node should be shared between everything that uses it self.assertIs(ds1_mod_dep_node, ds3_mod_dep_node) self.assertIs(ds1_mod_dep_node, ds6_modded_node) @@ -1757,7 +1795,7 @@ def test_load_comp11_and_23(self, cri, cl): """Test loading two composites that depend on similar wavelengths.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID, DatasetDict + from satpy import DatasetDict cri.return_value = {'fake_reader': FakeReader( 'fake_reader', 'fake_sensor')} comps, mods = test_composites('fake_sensor') @@ -1780,14 +1818,14 @@ def _test(self, sensor_names): reader='fake_reader') # mock the available comps/mods in the compositor loader avail_comps = scene.available_composite_ids() - self.assertIn(DatasetID(name='comp11'), avail_comps) - self.assertIn(DatasetID(name='comp23'), avail_comps) + self.assertIn(make_cid(name='comp11'), avail_comps) + self.assertIn(make_cid(name='comp23'), avail_comps) # it is fine that an optional prereq doesn't exist scene.load(['comp11', 'comp23']) comp11_node = scene.dep_tree['comp11'] comp23_node = scene.dep_tree['comp23'] - self.assertEqual(comp11_node.data[1][-1].name.name, 'ds10') - self.assertEqual(comp23_node.data[1][0].name.name, 'ds8') + self.assertEqual(comp11_node.data[1][-1].name['name'], 'ds10') + self.assertEqual(comp23_node.data[1][0].name['name'], 'ds8') loaded_ids = list(scene.datasets.keys()) self.assertEqual(len(loaded_ids), 2) self.assertIn('comp11', scene.datasets) @@ -1799,9 +1837,8 @@ def test_load_too_many(self, cri, cl): """Test dependency tree if too many reader keys match.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID - datasets = [DatasetID(name='duplicate1', wavelength=(0.1, 0.2, 0.3)), - DatasetID(name='duplicate2', wavelength=(0.1, 0.2, 0.3))] + datasets = [make_dataid(name='duplicate1', wavelength=(0.1, 0.2, 0.3)), + make_dataid(name='duplicate2', wavelength=(0.1, 0.2, 0.3))] reader = FakeReader('fake_reader', 'fake_sensor', datasets=datasets, filter_datasets=False) cri.return_value = {'fake_reader': reader} @@ -1821,7 +1858,6 @@ def test_available_comps_no_deps(self, cri, cl): from satpy.tests.utils import FakeReader, test_composites import satpy.scene from satpy.readers import DatasetDict - from satpy import DatasetID def _test(self, sensor_names): if not self.compositors: @@ -1843,9 +1879,9 @@ def _test(self, sensor_names): comps, mods = test_composites('fake_sensor') scene = satpy.scene.Scene(filenames=['bla'], base_dir='bli', reader='fake_reader') all_comp_ids = scene.available_composite_ids() - self.assertIn(DatasetID(name='static_image'), all_comp_ids) + self.assertIn(make_cid(name='static_image'), all_comp_ids) available_comp_ids = scene.available_composite_ids() - self.assertIn(DatasetID(name='static_image'), available_comp_ids) + self.assertIn(make_cid(name='static_image'), available_comp_ids) @mock.patch('satpy.composites.CompositorLoader.load_compositors') @mock.patch('satpy.scene.Scene.create_reader_instances') @@ -1890,7 +1926,6 @@ def test_resample_scene_copy(self, cri, cl, rs): """ import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict cri.return_value = {'fake_reader': FakeReader( @@ -1915,7 +1950,7 @@ def test_resample_scene_copy(self, cri, cl, rs): scene.load(['ds1']) comp19_node = scene.dep_tree['comp19'] - ds5_mod_id = DatasetID(name='ds5', modifiers=('res_change',)) + ds5_mod_id = make_dataid(name='ds5', modifiers=('res_change',)) ds5_node = scene.dep_tree[ds5_mod_id] comp13_node = scene.dep_tree['comp13'] @@ -1924,14 +1959,14 @@ def test_resample_scene_copy(self, cri, cl, rs): self.assertRaises(KeyError, scene.dep_tree.__getitem__, 'new_ds') loaded_ids = list(scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 2) - self.assertTupleEqual(tuple(loaded_ids[0]), tuple(DatasetID(name='comp19'))) - self.assertTupleEqual(tuple(loaded_ids[1]), tuple(DatasetID(name='ds1'))) + assert len(loaded_ids) == 2 + assert loaded_ids[0] == make_cid(name='comp19') + assert loaded_ids[1] == make_dataid(name='ds1', resolution=250, calibration='reflectance', modifiers=tuple()) loaded_ids = list(new_scene.datasets.keys()) - self.assertEqual(len(loaded_ids), 2) - self.assertTupleEqual(tuple(loaded_ids[0]), tuple(DatasetID(name='comp19'))) - self.assertTupleEqual(tuple(loaded_ids[1]), tuple(DatasetID(name='new_ds'))) + assert len(loaded_ids) == 2 + assert loaded_ids[0] == make_cid(name='comp19') + assert loaded_ids[1] == make_cid(name='new_ds') @mock.patch('satpy.scene.resample_dataset') @mock.patch('satpy.composites.CompositorLoader.load_compositors') @@ -1940,7 +1975,6 @@ def test_resample_reduce_data_toggle(self, cri, cl, rs): """Test that the Scene can be reduced or not reduced during resampling.""" import satpy.scene from satpy.tests.utils import FakeReader, test_composites - from satpy import DatasetID from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict import dask.array as da @@ -1977,9 +2011,9 @@ def test_resample_reduce_data_toggle(self, cri, cl, rs): # we force the below order of processing to test that success isn't # based on data of the same resolution being processed together test_order = [ - DatasetID.from_dict(scene['comp19'].attrs), - DatasetID.from_dict(scene['comp19_big'].attrs), - DatasetID.from_dict(scene['comp19_copy'].attrs), + make_cid(**scene['comp19'].attrs), + make_cid(**scene['comp19_big'].attrs), + make_cid(**scene['comp19_copy'].attrs), ] with mock.patch('satpy.scene.Scene._slice_data') as slice_data, \ mock.patch('satpy.dataset.dataset_walker') as ds_walker: @@ -2100,7 +2134,7 @@ def test_no_generate_comp10(self, cri, cl, rs): # it is fine that an optional prereq doesn't exist scene.load(['comp10'], generate=False) - self.assertTrue(any(ds_id == 'comp10' for ds_id in scene.wishlist)) + self.assertTrue(any(ds_id['name'] == 'comp10' for ds_id in scene.wishlist)) self.assertNotIn('comp10', scene.datasets) # two dependencies should have been loaded self.assertEqual(len(scene.datasets), 2) @@ -2113,13 +2147,13 @@ def test_no_generate_comp10(self, cri, cl, rs): self.assertEqual(len(scene.missing_datasets), 1) new_scn.generate_composites() - self.assertTrue(any(ds_id == 'comp10' for ds_id in new_scn.wishlist)) + self.assertTrue(any(ds_id['name'] == 'comp10' for ds_id in new_scn.wishlist)) self.assertIn('comp10', new_scn.datasets) self.assertEqual(len(new_scn.missing_datasets), 0) # try generating them right away new_scn = scene.resample(area_def) - self.assertTrue(any(ds_id == 'comp10' for ds_id in new_scn.wishlist)) + self.assertTrue(any(ds_id['name'] == 'comp10' for ds_id in new_scn.wishlist)) self.assertIn('comp10', new_scn.datasets) self.assertEqual(len(new_scn.missing_datasets), 0) @@ -2203,9 +2237,9 @@ def test_save_datasets_bad_writer(self): def test_save_datasets_missing_wishlist(self): """Calling 'save_datasets' with no valid datasets.""" - from satpy.scene import Scene, DatasetID + from satpy.scene import Scene scn = Scene() - scn.wishlist.add(DatasetID(name='true_color')) + scn.wishlist.add(make_cid(name='true_color')) self.assertRaises(RuntimeError, scn.save_datasets, writer='geotiff', diff --git a/satpy/tests/test_utils.py b/satpy/tests/test_utils.py index 70fe3fce06..f0435ce904 100644 --- a/satpy/tests/test_utils.py +++ b/satpy/tests/test_utils.py @@ -266,7 +266,7 @@ def test_make_fake_scene(): sc = make_fake_scene({ "six": np.arange(25).reshape(5, 5)}) assert len(sc.keys()) == 1 - assert sc.keys().pop().name == "six" + assert sc.keys().pop()['name'] == "six" assert sc["six"].attrs["area"].shape == (5, 5) sc = make_fake_scene({ "seven": np.arange(3*7).reshape(3, 7), diff --git a/satpy/tests/test_yaml_reader.py b/satpy/tests/test_yaml_reader.py index 3bd20df979..4a8f2fc74e 100644 --- a/satpy/tests/test_yaml_reader.py +++ b/satpy/tests/test_yaml_reader.py @@ -26,7 +26,8 @@ import satpy.readers.yaml_reader as yr from satpy.readers.file_handlers import BaseFileHandler -from satpy.dataset import DatasetID +from satpy.dataset import DataQuery +from satpy.tests.utils import make_dataid class FakeFH(BaseFileHandler): @@ -245,33 +246,15 @@ def setUp(self, _, rec_up): # pylint: disable=arguments-differ 'end_time': datetime(2000, 1, 2), }) - def test_all_dataset_ids(self): + def test_all_data_ids(self): """Check that all datasets ids are returned.""" - self.assertSetEqual(set(self.reader.all_dataset_ids), - {DatasetID(name='ch02', - wavelength=(0.7, 0.75, 0.8), - resolution=None, - polarization=None, - calibration='counts', - modifiers=()), - DatasetID(name='ch01', - wavelength=(0.5, 0.6, 0.7), - resolution=None, - polarization=None, - calibration='reflectance', - modifiers=()), - DatasetID(name='lons', - wavelength=None, - resolution=None, - polarization=None, - calibration=None, - modifiers=()), - DatasetID(name='lats', - wavelength=None, - resolution=None, - polarization=None, - calibration=None, - modifiers=())}) + for dataid in self.reader.all_dataset_ids: + name = dataid['name'].replace('0', '') + assert self.config['datasets'][name]['name'] == dataid['name'] + if 'wavelength' in self.config['datasets'][name]: + assert self.config['datasets'][name]['wavelength'] == list(dataid['wavelength'])[:3] + if 'calibration' in self.config['datasets'][name]: + assert self.config['datasets'][name]['calibration'] == dataid['calibration'] def test_all_dataset_names(self): """Get all dataset names.""" @@ -283,18 +266,14 @@ def test_available_dataset_ids(self): loadables = self.reader.select_files_from_pathnames(['a001.bla']) self.reader.create_filehandlers(loadables) self.assertSetEqual(set(self.reader.available_dataset_ids), - {DatasetID(name='ch02', - wavelength=(0.7, 0.75, 0.8), - resolution=None, - polarization=None, - calibration='counts', - modifiers=()), - DatasetID(name='ch01', - wavelength=(0.5, 0.6, 0.7), - resolution=None, - polarization=None, - calibration='reflectance', - modifiers=())}) + {make_dataid(name='ch02', + wavelength=(0.7, 0.75, 0.8), + calibration='counts', + modifiers=()), + make_dataid(name='ch01', + wavelength=(0.5, 0.6, 0.7), + calibration='reflectance', + modifiers=())}) def test_available_dataset_names(self): """Get ids of the available datasets.""" @@ -433,12 +412,12 @@ def test_supports_sensor(self): @patch('satpy.readers.yaml_reader.StackedAreaDefinition') def test_load_area_def(self, sad): """Test loading the area def for the reader.""" - dsid = MagicMock() + dataid = MagicMock() file_handlers = [] items = random.randrange(2, 10) for _i in range(items): file_handlers.append(MagicMock()) - final_area = self.reader._load_area_def(dsid, file_handlers) + final_area = self.reader._load_area_def(dataid, file_handlers) self.assertEqual(final_area, sad.return_value.squeeze.return_value) args, kwargs = sad.call_args @@ -453,49 +432,28 @@ def test_preferred_filetype(self): def test_get_coordinates_for_dataset_key(self): """Test getting coordinates for a key.""" - ds_id = DatasetID(name='ch01', wavelength=(0.5, 0.6, 0.7), - resolution=None, polarization=None, - calibration='reflectance', modifiers=()) - res = self.reader._get_coordinates_for_dataset_key(ds_id) + ds_q = DataQuery(name='ch01', wavelength=(0.5, 0.6, 0.7, 'µm'), + calibration='reflectance', modifiers=()) + res = self.reader._get_coordinates_for_dataset_key(ds_q) self.assertListEqual(res, - [DatasetID(name='lons', - wavelength=None, - resolution=None, - polarization=None, - calibration=None, - modifiers=()), - DatasetID(name='lats', - wavelength=None, - resolution=None, - polarization=None, - calibration=None, - modifiers=())]) + [make_dataid(name='lons'), + make_dataid(name='lats')]) def test_get_coordinates_for_dataset_key_without(self): """Test getting coordinates for a key without coordinates.""" - ds_id = DatasetID(name='lons', - wavelength=None, - resolution=None, - polarization=None, - calibration=None, - modifiers=()) + ds_id = make_dataid(name='lons', + modifiers=()) res = self.reader._get_coordinates_for_dataset_key(ds_id) self.assertListEqual(res, []) def test_get_coordinates_for_dataset_keys(self): """Test getting coordinates for keys.""" - ds_id1 = DatasetID(name='ch01', wavelength=(0.5, 0.6, 0.7), - resolution=None, polarization=None, - calibration='reflectance', modifiers=()) - ds_id2 = DatasetID(name='ch02', wavelength=(0.7, 0.75, 0.8), - resolution=None, polarization=None, - calibration='counts', modifiers=()) - lons = DatasetID(name='lons', wavelength=None, - resolution=None, polarization=None, - calibration=None, modifiers=()) - lats = DatasetID(name='lats', wavelength=None, - resolution=None, polarization=None, - calibration=None, modifiers=()) + ds_id1 = make_dataid(name='ch01', wavelength=(0.5, 0.6, 0.7), + calibration='reflectance', modifiers=()) + ds_id2 = make_dataid(name='ch02', wavelength=(0.7, 0.75, 0.8), + calibration='counts', modifiers=()) + lons = make_dataid(name='lons', modifiers=()) + lats = make_dataid(name='lats', modifiers=()) res = self.reader._get_coordinates_for_dataset_keys([ds_id1, ds_id2, lons]) @@ -505,16 +463,13 @@ def test_get_coordinates_for_dataset_keys(self): def test_get_file_handlers(self): """Test getting filehandler to load a dataset.""" - ds_id1 = DatasetID(name='ch01', wavelength=(0.5, 0.6, 0.7), - resolution=None, polarization=None, - calibration='reflectance', modifiers=()) + ds_id1 = make_dataid(name='ch01', wavelength=(0.5, 0.6, 0.7), + calibration='reflectance', modifiers=()) self.reader.file_handlers = {'ftype1': 'bla'} self.assertEqual(self.reader._get_file_handlers(ds_id1), 'bla') - lons = DatasetID(name='lons', wavelength=None, - resolution=None, polarization=None, - calibration=None, modifiers=()) + lons = make_dataid(name='lons', modifiers=()) self.assertEqual(self.reader._get_file_handlers(lons), None) @patch('satpy.readers.yaml_reader.xr') @@ -626,8 +581,8 @@ def file_type_matches(self, ds_ftype): file_types = ds_info['file_type'] if not isinstance(file_types, list): file_types = [file_types] - expected = resol if ftype in file_types else None - self.assertEqual(expected, ds_id.resolution) + if ftype in file_types: + self.assertEqual(resol, ds_id['resolution']) class TestGEOFlippableFileYAMLReader(unittest.TestCase): @@ -866,12 +821,12 @@ def test_load_dataset(self, mss, xr, parent_load_dataset): xr.full_like.return_value = empty_segment concat_slices = MagicMock() xr.concat.return_value = concat_slices - dsid = MagicMock() + dataid = MagicMock() ds_info = MagicMock() file_handlers = MagicMock() # No missing segments - res = self.reader._load_dataset(dsid, ds_info, file_handlers) + res = self.reader._load_dataset(dataid, ds_info, file_handlers) self.assertTrue(res.attrs is file_handlers[0].combine_info.return_value) self.assertTrue(empty_segment not in slice_list) @@ -880,7 +835,7 @@ def test_load_dataset(self, mss, xr, parent_load_dataset): counter = 8 mss.return_value = (counter, expected_segments, slice_list, failure, projectable) - res = self.reader._load_dataset(dsid, ds_info, file_handlers) + res = self.reader._load_dataset(dataid, ds_info, file_handlers) self.assertTrue(slice_list[4] is empty_segment) # The last segment is missing @@ -889,7 +844,7 @@ def test_load_dataset(self, mss, xr, parent_load_dataset): counter = 8 mss.return_value = (counter, expected_segments, slice_list, failure, projectable) - res = self.reader._load_dataset(dsid, ds_info, file_handlers) + res = self.reader._load_dataset(dataid, ds_info, file_handlers) self.assertTrue(slice_list[-1] is empty_segment) # The last two segments are missing @@ -898,7 +853,7 @@ def test_load_dataset(self, mss, xr, parent_load_dataset): counter = 7 mss.return_value = (counter, expected_segments, slice_list, failure, projectable) - res = self.reader._load_dataset(dsid, ds_info, file_handlers) + res = self.reader._load_dataset(dataid, ds_info, file_handlers) self.assertTrue(slice_list[-1] is empty_segment) self.assertTrue(slice_list[-2] is empty_segment) @@ -908,7 +863,7 @@ def test_load_dataset(self, mss, xr, parent_load_dataset): counter = 9 mss.return_value = (counter, expected_segments, slice_list, failure, projectable) - res = self.reader._load_dataset(dsid, ds_info, file_handlers) + res = self.reader._load_dataset(dataid, ds_info, file_handlers) self.assertTrue(slice_list[0] is empty_segment) # The first two segments are missing @@ -918,14 +873,14 @@ def test_load_dataset(self, mss, xr, parent_load_dataset): counter = 9 mss.return_value = (counter, expected_segments, slice_list, failure, projectable) - res = self.reader._load_dataset(dsid, ds_info, file_handlers) + res = self.reader._load_dataset(dataid, ds_info, file_handlers) self.assertTrue(slice_list[0] is empty_segment) self.assertTrue(slice_list[1] is empty_segment) # Disable padding - res = self.reader._load_dataset(dsid, ds_info, file_handlers, + res = self.reader._load_dataset(dataid, ds_info, file_handlers, pad_data=False) - parent_load_dataset.assert_called_once_with(dsid, ds_info, + parent_load_dataset.assert_called_once_with(dataid, ds_info, file_handlers) @patch('satpy.readers.yaml_reader._load_area_def') @@ -934,16 +889,16 @@ def test_load_dataset(self, mss, xr, parent_load_dataset): @patch('satpy.readers.yaml_reader._pad_later_segments_area') def test_load_area_def(self, pesa, plsa, sad, parent_load_area_def): """Test _load_area_def().""" - dsid = MagicMock() + dataid = MagicMock() file_handlers = MagicMock() - self.reader._load_area_def(dsid, file_handlers) + self.reader._load_area_def(dataid, file_handlers) pesa.assert_called_once() plsa.assert_called_once() sad.assert_called_once() parent_load_area_def.assert_not_called() # Disable padding - self.reader._load_area_def(dsid, file_handlers, pad_data=False) - parent_load_area_def.assert_called_once_with(dsid, file_handlers) + self.reader._load_area_def(dataid, file_handlers, pad_data=False) + parent_load_area_def.assert_called_once_with(dataid, file_handlers) @patch('satpy.readers.yaml_reader.AreaDefinition') def test_pad_later_segments_area(self, AreaDefinition): @@ -963,8 +918,8 @@ def test_pad_later_segments_area(self, AreaDefinition): fh_1.filename_info = filename_info fh_1.get_area_def = get_area_def file_handlers = [fh_1] - dsid = 'dsid' - res = plsa(file_handlers, dsid) + dataid = 'dataid' + res = plsa(file_handlers, dataid) self.assertEqual(len(res), 2) seg2_extent = (0, 1500, 200, 1000) expected_call = ('fill', 'fill', 'fill', 'proj_dict', 500, 200, @@ -989,9 +944,9 @@ def test_pad_earlier_segments_area(self, AreaDefinition): fh_2.filename_info = filename_info fh_2.get_area_def = get_area_def file_handlers = [fh_2] - dsid = 'dsid' + dataid = 'dataid' area_defs = {2: seg2_area} - res = pesa(file_handlers, dsid, area_defs) + res = pesa(file_handlers, dataid, area_defs) self.assertEqual(len(res), 2) seg1_extent = (0, 500, 200, 0) expected_call = ('fill', 'fill', 'fill', 'proj_dict', 500, 200, @@ -1010,8 +965,8 @@ def test_find_missing_segments(self): fh_seg1.get_dataset = get_dataset file_handlers = [fh_seg1] ds_info = {'file_type': []} - dsid = 'dsid' - res = fms(file_handlers, ds_info, dsid) + dataid = 'dataid' + res = fms(file_handlers, ds_info, dataid) counter, expected_segments, slice_list, failure, proj = res self.assertEqual(counter, 2) self.assertEqual(expected_segments, 1) @@ -1031,8 +986,8 @@ def test_find_missing_segments(self): fh_seg2.get_dataset = get_dataset file_handlers = [fh_seg2] ds_info = {'file_type': ['foo']} - dsid = 'dsid' - res = fms(file_handlers, ds_info, dsid) + dataid = 'dataid' + res = fms(file_handlers, ds_info, dataid) counter, expected_segments, slice_list, failure, proj = res self.assertEqual(counter, 3) self.assertEqual(expected_segments, 3) diff --git a/satpy/tests/utils.py b/satpy/tests/utils.py index ff387b563c..f9b2dfee18 100644 --- a/satpy/tests/utils.py +++ b/satpy/tests/utils.py @@ -17,9 +17,27 @@ """Utilities for various satpy tests.""" from datetime import datetime -from satpy.readers.yaml_reader import FileYAMLReader from unittest import mock +from satpy.dataset import (DataID, DataQuery, default_id_keys_config, + minimal_default_keys_config) +from satpy.readers.yaml_reader import FileYAMLReader + + +def make_dataid(**items): + """Make a DataID with default keys.""" + return DataID(default_id_keys_config, **items) + + +def make_cid(**items): + """Make a DataID with a minimal set of keys to id composites.""" + return DataID(minimal_default_keys_config, **items) + + +def make_dsq(**items): + """Make a dataset query.""" + return DataQuery(**items) + def spy_decorator(method_to_decorate): """Fancy decorate to wrap an object while still calling it. @@ -87,25 +105,24 @@ def convert_file_content_to_data_array(file_content, attrs=tuple(), def test_datasets(): """Get list of various test datasets.""" - from satpy import DatasetID d = [ - DatasetID(name='ds1'), - DatasetID(name='ds2'), - DatasetID(name='ds3'), - DatasetID(name='ds4', calibration='reflectance'), - DatasetID(name='ds4', calibration='radiance'), - DatasetID(name='ds5', resolution=250), - DatasetID(name='ds5', resolution=500), - DatasetID(name='ds5', resolution=1000), - DatasetID(name='ds6', wavelength=(0.1, 0.2, 0.3)), - DatasetID(name='ds7', wavelength=(0.4, 0.5, 0.6)), - DatasetID(name='ds8', wavelength=(0.7, 0.8, 0.9)), - DatasetID(name='ds9_fail_load', wavelength=(1.0, 1.1, 1.2)), - DatasetID(name='ds10', wavelength=(0.75, 0.85, 0.95)), - DatasetID(name='ds11', resolution=500), - DatasetID(name='ds11', resolution=1000), - DatasetID(name='ds12', resolution=500), - DatasetID(name='ds12', resolution=1000), + make_dataid(name='ds1', resolution=250, calibration='reflectance'), + make_dataid(name='ds2', resolution=250, calibration='reflectance'), + make_dataid(name='ds3'), + make_dataid(name='ds4', calibration='reflectance'), + make_dataid(name='ds4', calibration='radiance'), + make_dataid(name='ds5', resolution=250), + make_dataid(name='ds5', resolution=500), + make_dataid(name='ds5', resolution=1000), + make_dataid(name='ds6', wavelength=(0.1, 0.2, 0.3), resolution=250), + make_dataid(name='ds7', wavelength=(0.4, 0.5, 0.6)), + make_dataid(name='ds8', wavelength=(0.7, 0.8, 0.9)), + make_dataid(name='ds9_fail_load', wavelength=(1.0, 1.1, 1.2)), + make_dataid(name='ds10', wavelength=(0.75, 0.85, 0.95)), + make_dataid(name='ds11', resolution=500), + make_dataid(name='ds11', resolution=1000), + make_dataid(name='ds12', resolution=500), + make_dataid(name='ds12', resolution=1000), ] return d @@ -125,10 +142,10 @@ def _create_fake_compositor(ds_id, prereqs, opt_prereqs): se = mock.MagicMock() def _se(datasets, optional_datasets=None, ds_id=ds_id, **kwargs): - if ds_id.name == 'comp14': + if ds_id['name'] == 'comp14': # used as a test when composites update the dataset id with # information from prereqs - ds_id = ds_id._replace(resolution=555) + ds_id = DataID(ds_id.id_keys, resolution=555, **ds_id) if len(datasets) != len(prereqs): raise ValueError("Not enough prerequisite datasets passed") return DataArray(data=np.arange(75).reshape(5, 5, 3), @@ -144,7 +161,6 @@ def _create_fake_modifiers(name, prereqs, opt_prereqs): import numpy as np from xarray import DataArray from satpy.composites import CompositeBase, IncompatibleAreas - from satpy import DatasetID attrs = { 'name': name, @@ -165,7 +181,7 @@ def __call__(self, datasets, optional_datasets, **info): continue assert optional_datasets is not None and \ len(optional_datasets) - resolution = DatasetID.from_dict(datasets[0].attrs).resolution + resolution = datasets[0].attrs.get('resolution') if name == 'res_change' and resolution is not None: i = datasets[0].attrs.copy() i['resolution'] *= 5 @@ -189,48 +205,48 @@ def __call__(self, datasets, optional_datasets, **info): def test_composites(sensor_name): """Create some test composites.""" - from satpy import DatasetID, DatasetDict + from satpy import DatasetDict # Composite ID -> (prereqs, optional_prereqs) comps = { - DatasetID(name='comp1'): (['ds1'], []), - DatasetID(name='comp2'): (['ds1', 'ds2'], []), - DatasetID(name='comp3'): (['ds1', 'ds2', 'ds3'], []), - DatasetID(name='comp4'): (['comp2', 'ds3'], []), - DatasetID(name='comp5'): (['ds1', 'ds2'], ['ds3']), - DatasetID(name='comp6'): (['ds1', 'ds2'], ['comp2']), - DatasetID(name='comp7'): (['ds1', 'comp2'], ['ds2']), - DatasetID(name='comp8'): (['ds_NOPE', 'comp2'], []), - DatasetID(name='comp9'): (['ds1', 'comp2'], ['ds_NOPE']), - DatasetID(name='comp10'): ([DatasetID('ds1', modifiers=('mod1',)), 'comp2'], []), - DatasetID(name='comp11'): ([0.22, 0.48, 0.85], []), - DatasetID(name='comp12'): ([DatasetID(wavelength=0.22, modifiers=('mod1',)), - DatasetID(wavelength=0.48, modifiers=('mod1',)), - DatasetID(wavelength=0.85, modifiers=('mod1',))], []), - DatasetID(name='comp13'): ([DatasetID(name='ds5', modifiers=('res_change',))], []), - DatasetID(name='comp14'): (['ds1'], []), - DatasetID(name='comp15'): (['ds1', 'ds9_fail_load'], []), - DatasetID(name='comp16'): (['ds1'], ['ds9_fail_load']), - DatasetID(name='comp17'): (['ds1', 'comp15'], []), - DatasetID(name='comp18'): (['ds3', - DatasetID(name='ds4', modifiers=('mod1', 'mod3',)), - DatasetID(name='ds5', modifiers=('mod1', 'incomp_areas'))], []), - DatasetID(name='comp18_2'): (['ds3', - DatasetID(name='ds4', modifiers=('mod1', 'mod3',)), - DatasetID(name='ds5', modifiers=('mod1', 'incomp_areas_opt'))], []), - DatasetID(name='comp19'): ([DatasetID('ds5', modifiers=('res_change',)), 'comp13', 'ds2'], []), - DatasetID(name='comp20'): ([DatasetID(name='ds5', modifiers=('mod_opt_prereq',))], []), - DatasetID(name='comp21'): ([DatasetID(name='ds5', modifiers=('mod_bad_opt',))], []), - DatasetID(name='comp22'): ([DatasetID(name='ds5', modifiers=('mod_opt_only',))], []), - DatasetID(name='comp23'): ([0.8], []), - DatasetID(name='static_image'): ([], []), - DatasetID(name='comp24', resolution=500): ([DatasetID(name='ds11', resolution=500), - DatasetID(name='ds12', resolution=500)], []), - DatasetID(name='comp24', resolution=1000): ([DatasetID(name='ds11', resolution=1000), - DatasetID(name='ds12', resolution=1000)], []), - DatasetID(name='comp25', resolution=500): ([DatasetID(name='comp24', resolution=500), - DatasetID(name='ds5', resolution=500)], []), - DatasetID(name='comp25', resolution=1000): ([DatasetID(name='comp24', resolution=1000), - DatasetID(name='ds5', resolution=1000)], []), + make_cid(name='comp1'): (['ds1'], []), + make_cid(name='comp2'): (['ds1', 'ds2'], []), + make_cid(name='comp3'): (['ds1', 'ds2', 'ds3'], []), + make_cid(name='comp4'): (['comp2', 'ds3'], []), + make_cid(name='comp5'): (['ds1', 'ds2'], ['ds3']), + make_cid(name='comp6'): (['ds1', 'ds2'], ['comp2']), + make_cid(name='comp7'): (['ds1', 'comp2'], ['ds2']), + make_cid(name='comp8'): (['ds_NOPE', 'comp2'], []), + make_cid(name='comp9'): (['ds1', 'comp2'], ['ds_NOPE']), + make_cid(name='comp10'): ([make_dsq(name='ds1', modifiers=('mod1',)), 'comp2'], []), + make_cid(name='comp11'): ([0.22, 0.48, 0.85], []), + make_cid(name='comp12'): ([make_dsq(wavelength=0.22, modifiers=('mod1',)), + make_dsq(wavelength=0.48, modifiers=('mod1',)), + make_dsq(wavelength=0.85, modifiers=('mod1',))], []), + make_cid(name='comp13'): ([make_dsq(name='ds5', modifiers=('res_change',))], []), + make_cid(name='comp14'): (['ds1'], []), + make_cid(name='comp15'): (['ds1', 'ds9_fail_load'], []), + make_cid(name='comp16'): (['ds1'], ['ds9_fail_load']), + make_cid(name='comp17'): (['ds1', 'comp15'], []), + make_cid(name='comp18'): (['ds3', + make_dsq(name='ds4', modifiers=('mod1', 'mod3',)), + make_dsq(name='ds5', modifiers=('mod1', 'incomp_areas'))], []), + make_cid(name='comp18_2'): (['ds3', + make_dsq(name='ds4', modifiers=('mod1', 'mod3',)), + make_dsq(name='ds5', modifiers=('mod1', 'incomp_areas_opt'))], []), + make_cid(name='comp19'): ([make_dsq(name='ds5', modifiers=('res_change',)), 'comp13', 'ds2'], []), + make_cid(name='comp20'): ([make_dsq(name='ds5', modifiers=('mod_opt_prereq',))], []), + make_cid(name='comp21'): ([make_dsq(name='ds5', modifiers=('mod_bad_opt',))], []), + make_cid(name='comp22'): ([make_dsq(name='ds5', modifiers=('mod_opt_only',))], []), + make_cid(name='comp23'): ([0.8], []), + make_cid(name='static_image'): ([], []), + make_cid(name='comp24', resolution=500): ([make_dsq(name='ds11', resolution=500), + make_dsq(name='ds12', resolution=500)], []), + make_cid(name='comp24', resolution=1000): ([make_dsq(name='ds11', resolution=1000), + make_dsq(name='ds12', resolution=1000)], []), + make_cid(name='comp25', resolution=500): ([make_dsq(name='comp24', resolution=500), + make_dsq(name='ds5', resolution=500)], []), + make_cid(name='comp25', resolution=1000): ([make_dsq(name='comp24', resolution=1000), + make_dsq(name='ds5', resolution=1000)], []), } # Modifier name -> (prereqs (not including to-be-modified), opt_prereqs) mods = { @@ -239,13 +255,12 @@ def test_composites(sensor_name): 'mod3': (['ds2'], []), 'res_change': ([], []), 'incomp_areas': (['ds1'], []), - 'incomp_areas_opt': ([DatasetID(name='ds1', modifiers=('incomp_areas',))], ['ds2']), + 'incomp_areas_opt': ([make_dataid(name='ds1', modifiers=('incomp_areas',))], ['ds2']), 'mod_opt_prereq': (['ds1'], ['ds2']), 'mod_bad_opt': (['ds1'], ['ds9_fail_load']), 'mod_opt_only': ([], ['ds2']), - 'mod_wl': ([DatasetID(wavelength=0.2, modifiers=('mod1',))], []), + 'mod_wl': ([make_dsq(wavelength=0.2, modifiers=('mod1',))], []), } - comps = {sensor_name: DatasetDict((k, _create_fake_compositor(k, *v)) for k, v in comps.items())} mods = {sensor_name: dict((k, _create_fake_modifiers(k, *v)) for k, v in mods.items())} @@ -253,13 +268,13 @@ def test_composites(sensor_name): def _filter_datasets(all_ds, names_or_ids): - """Help filtering DatasetIDs by name or DatasetID.""" - # DatasetID will match a str to the name + """Help filtering DataIDs by name or DataQuery.""" + # DataID will match a str to the name # need to separate them out str_filter = [ds_name for ds_name in names_or_ids if isinstance(ds_name, str)] id_filter = [ds_id for ds_id in names_or_ids if not isinstance(ds_id, str)] for ds_id in all_ds: - if ds_id in id_filter or ds_id.name in str_filter: + if ds_id in id_filter or ds_id['name'] in str_filter: yield ds_id @@ -329,7 +344,7 @@ def load(self, dataset_keys): dataset_ids = self.all_ids.keys() loaded_datasets = DatasetDict() for k in dataset_keys: - if k == 'ds9_fail_load': + if k['name'] == 'ds9_fail_load': continue for ds in dataset_ids: if ds == k: diff --git a/satpy/tests/writer_tests/test_cf.py b/satpy/tests/writer_tests/test_cf.py index d6b3147d23..0976ab16e4 100644 --- a/satpy/tests/writer_tests/test_cf.py +++ b/satpy/tests/writer_tests/test_cf.py @@ -23,7 +23,7 @@ from unittest import mock from datetime import datetime import tempfile -from satpy import DatasetID +from satpy.tests.utils import make_dsq import numpy as np @@ -71,14 +71,12 @@ def test_save_array(self): scn['test-array'] = xr.DataArray([1, 2, 3], attrs=dict(start_time=start_time, end_time=end_time, - prerequisites=[DatasetID('hej')])) + prerequisites=[make_dsq(name='hej')])) with TempFile() as filename: scn.save_datasets(filename=filename, writer='cf') with xr.open_dataset(filename) as f: self.assertTrue(np.all(f['test-array'][:] == [1, 2, 3])) - expected_prereq = ("DatasetID(name='hej', wavelength=None, " - "resolution=None, polarization=None, " - "calibration=None, level=None, modifiers=())") + expected_prereq = ("DataQuery(name='hej')") self.assertEqual(f['test-array'].attrs['prerequisites'], expected_prereq) @@ -94,7 +92,7 @@ def test_save_with_compression(self): scn['test-array'] = xr.DataArray([1, 2, 3], attrs=dict(start_time=start_time, end_time=end_time, - prerequisites=[DatasetID('hej')])) + prerequisites=[make_dsq(name='hej')])) comp = {'zlib': True, 'complevel': 9} scn.save_datasets(filename='bla', writer='cf', compression=comp) @@ -123,7 +121,7 @@ def test_save_array_coords(self): coords=coords, attrs=dict(start_time=start_time, end_time=end_time, - prerequisites=[DatasetID('hej')])) + prerequisites=[make_dsq(name='hej')])) with TempFile() as filename: scn.save_datasets(filename=filename, writer='cf') with xr.open_dataset(filename) as f: @@ -133,9 +131,7 @@ def test_save_array_coords(self): self.assertNotIn('crs', f) self.assertNotIn('_FillValue', f['x'].attrs) self.assertNotIn('_FillValue', f['y'].attrs) - expected_prereq = ("DatasetID(name='hej', wavelength=None, " - "resolution=None, polarization=None, " - "calibration=None, level=None, modifiers=())") + expected_prereq = ("DataQuery(name='hej')") self.assertEqual(f['test-array'].attrs['prerequisites'], expected_prereq) @@ -502,11 +498,10 @@ def test_da2cf(self): # Create set of test attributes attrs, attrs_expected, attrs_expected_flat = self.get_test_attrs() attrs['area'] = 'some_area' - attrs['prerequisites'] = [DatasetID('hej')] + attrs['prerequisites'] = [make_dsq(name='hej')] # Adjust expected attributes - expected_prereq = ("DatasetID(name='hej', wavelength=None, resolution=None, polarization=None, " - "calibration=None, level=None, modifiers=())") + expected_prereq = ("DataQuery(name='hej')") update = {'prerequisites': [expected_prereq], 'long_name': attrs['name']} attrs_expected.update(update) diff --git a/satpy/tests/writer_tests/test_mitiff.py b/satpy/tests/writer_tests/test_mitiff.py index 17c5c8678d..3ddf270a3c 100644 --- a/satpy/tests/writer_tests/test_mitiff.py +++ b/satpy/tests/writer_tests/test_mitiff.py @@ -27,12 +27,12 @@ class TestMITIFFWriter(unittest.TestCase): """Test the MITIFF Writer class.""" def setUp(self): - """Create temporary directory to save files to""" + """Create temporary directory to save files to.""" import tempfile self.base_dir = tempfile.mkdtemp() def tearDown(self): - """Remove the temporary directory created for a test""" + """Remove the temporary directory created for a test.""" try: import shutil shutil.rmtree(self.base_dir, ignore_errors=True) @@ -40,7 +40,7 @@ def tearDown(self): pass def _get_test_datasets(self): - """Helper function to create a datasets list.""" + """Create a datasets list.""" import xarray as xr import dask.array as da from datetime import datetime @@ -106,7 +106,7 @@ def _get_test_datasets(self): return [ds1, ds2] def _get_test_datasets_sensor_set(self): - """Helper function to create a datasets list.""" + """Create a datasets list.""" import xarray as xr import dask.array as da from datetime import datetime @@ -172,7 +172,7 @@ def _get_test_datasets_sensor_set(self): return [ds1, ds2] def _get_test_dataset(self, bands=3): - """Helper function to create a single test dataset.""" + """Create a single test dataset.""" import xarray as xr import dask.array as da from datetime import datetime @@ -202,7 +202,7 @@ def _get_test_dataset(self, bands=3): return ds1 def _get_test_one_dataset(self): - """Helper function to create a single test dataset.""" + """Create a single test dataset.""" import xarray as xr import dask.array as da from datetime import datetime @@ -232,7 +232,7 @@ def _get_test_one_dataset(self): return ds1 def _get_test_one_dataset_sensor_set(self): - """Helper function to create a single test dataset.""" + """Create a single test dataset.""" import xarray as xr import dask.array as da from datetime import datetime @@ -262,7 +262,7 @@ def _get_test_one_dataset_sensor_set(self): return ds1 def _get_test_dataset_with_bad_values(self, bands=3): - """Helper function to create a single test dataset.""" + """Create a single test dataset.""" import xarray as xr import numpy as np from datetime import datetime @@ -296,13 +296,13 @@ def _get_test_dataset_with_bad_values(self, bands=3): return ds1 def _get_test_dataset_calibration(self, bands=6): - """Helper function to create a single test dataset.""" + """Create a single test dataset.""" import xarray as xr import dask.array as da from datetime import datetime from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.scene import Scene area_def = AreaDefinition( 'test', @@ -315,13 +315,13 @@ def _get_test_dataset_calibration(self, bands=6): (-1000., -1500., 1000., 1500.), ) - d = [ - DatasetID(name='1', calibration='reflectance'), - DatasetID(name='2', calibration='reflectance'), - DatasetID(name='3', calibration='brightness_temperature'), - DatasetID(name='4', calibration='brightness_temperature'), - DatasetID(name='5', calibration='brightness_temperature'), - DatasetID(name='6', calibration='reflectance') + prereqs = [ + make_dsq(name='1', calibration='reflectance'), + make_dsq(name='2', calibration='reflectance'), + make_dsq(name='3', calibration='brightness_temperature'), + make_dsq(name='4', calibration='brightness_temperature'), + make_dsq(name='5', calibration='brightness_temperature'), + make_dsq(name='6', calibration='reflectance') ] scene = Scene() scene["1"] = xr.DataArray(da.zeros((100, 200), chunks=50), @@ -355,7 +355,7 @@ def _get_test_dataset_calibration(self, bands=6): 'platform_name': "TEST_PLATFORM_NAME", 'sensor': 'test-sensor', 'area': area_def, - 'prerequisites': d, + 'prerequisites': prereqs, 'metadata_requirements': { 'order': ['1', '2', '3', '4', '5', '6'], 'config': { @@ -399,13 +399,13 @@ def _get_test_dataset_calibration(self, bands=6): return ds1 def _get_test_dataset_calibration_one_dataset(self, bands=1): - """Helper function to create a single test dataset.""" + """Create a single test dataset.""" import xarray as xr import dask.array as da from datetime import datetime from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict - from satpy import DatasetID + from satpy.tests.utils import make_dsq from satpy.scene import Scene area_def = AreaDefinition( 'test', @@ -418,7 +418,7 @@ def _get_test_dataset_calibration_one_dataset(self, bands=1): (-1000., -1500., 1000., 1500.), ) - d = [DatasetID(name='4', calibration='brightness_temperature')] + prereqs = [make_dsq(name='4', calibration='brightness_temperature')] scene = Scene() scene["4"] = xr.DataArray(da.zeros((100, 200), chunks=50), dims=('y', 'x'), @@ -433,7 +433,7 @@ def _get_test_dataset_calibration_one_dataset(self, bands=1): 'platform_name': "TEST_PLATFORM_NAME", 'sensor': 'test-sensor', 'area': area_def, - 'prerequisites': d, + 'prerequisites': prereqs, 'metadata_requirements': { 'order': ['4'], 'config': { @@ -452,13 +452,13 @@ def _get_test_dataset_calibration_one_dataset(self, bands=1): return ds1 def _get_test_dataset_three_bands_two_prereq(self, bands=3): - """Helper function to create a single test dataset.""" + """Create a single test dataset.""" import xarray as xr import dask.array as da from datetime import datetime from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict - from satpy import DatasetID + from satpy.tests.utils import make_dsq area_def = AreaDefinition( 'test', 'test', @@ -479,8 +479,8 @@ def _get_test_dataset_three_bands_two_prereq(self, bands=3): 'platform_name': "TEST_PLATFORM_NAME", 'sensor': 'TEST_SENSOR_NAME', 'area': area_def, - 'prerequisites': [DatasetID(name='1', calibration='reflectance'), - DatasetID(name='2', calibration='reflectance')]} + 'prerequisites': [make_dsq(name='1', calibration='reflectance'), + make_dsq(name='2', calibration='reflectance')]} ) return ds1 @@ -803,6 +803,7 @@ def test_save_dataset_with_bad_value(self): np.testing.assert_allclose(image, expected, atol=1.e-6, rtol=0) def test_convert_proj4_string(self): + """Test conversion of geolocations.""" import xarray as xr import dask.array as da from satpy.writers.mitiff import MITIFFWriter @@ -936,7 +937,7 @@ def test_save_dataset_palette(self): np.testing.assert_allclose(image, expected, atol=1.e-6, rtol=0) def test_simple_write_two_bands(self): - """Test basic writer operation with 3 bands from 2 prerequisites""" + """Test basic writer operation with 3 bands from 2 prerequisites.""" from satpy.writers.mitiff import MITIFFWriter dataset = self._get_test_dataset_three_bands_two_prereq() w = MITIFFWriter(base_dir=self.base_dir) diff --git a/satpy/utils.py b/satpy/utils.py index 461054e0aa..58ef47932b 100644 --- a/satpy/utils.py +++ b/satpy/utils.py @@ -54,6 +54,8 @@ def read(self, filename): # Pass if file not found if e.errno != 2: raise + finally: + conf_file.close() return self.config_parser.read(filename) diff --git a/satpy/writers/mitiff.py b/satpy/writers/mitiff.py index 0f97b258bd..78f1607e87 100644 --- a/satpy/writers/mitiff.py +++ b/satpy/writers/mitiff.py @@ -23,7 +23,7 @@ from satpy.writers import ImageWriter from satpy.writers import get_enhanced_image -from satpy.dataset import DatasetID +from satpy.dataset import DataQuery, DataID import dask @@ -172,10 +172,16 @@ def _make_channel_list(self, datasets, **kwargs): if self.channel_order: for cn in self.channel_order[kwargs['sensor']]: for ch, ds in enumerate(datasets): - if ds.attrs['prerequisites'][ch][0] == cn: - channels.append( - ds.attrs['prerequisites'][ch][0]) - break + if isinstance(ds.attrs['prerequisites'][ch], (DataQuery, DataID)): + if ds.attrs['prerequisites'][ch]['name'] == cn: + channels.append( + ds.attrs['prerequisites'][ch]['name']) + break + else: + if ds.attrs['prerequisites'][ch] == cn: + channels.append( + ds.attrs['prerequisites'][ch]) + break elif self.palette: if 'palette_channel_name' in kwargs: channels.append(kwargs['palette_channel_name'].upper()) @@ -360,15 +366,15 @@ def _add_calibration_datasets(self, ch, datasets, reverse_offset, reverse_scale, if ('prerequisites' in ds.attrs and isinstance(ds.attrs['prerequisites'], list) and len(ds.attrs['prerequisites']) >= i + 1 and - isinstance(ds.attrs['prerequisites'][i], DatasetID)): - if ds.attrs['prerequisites'][i][0] == ch: - if ds.attrs['prerequisites'][i][4] == 'RADIANCE': + isinstance(ds.attrs['prerequisites'][i], (DataQuery, DataID))): + if ds.attrs['prerequisites'][i]['name'] == str(ch): + if ds.attrs['prerequisites'][i].get('calibration') == 'RADIANCE': raise NotImplementedError( "Mitiff radiance calibration not implemented.") # _table_calibration += ', Radiance, ' # _table_calibration += '[W/m²/µm/sr]' # _decimals = 8 - elif ds.attrs['prerequisites'][i][4] == 'brightness_temperature': + elif ds.attrs['prerequisites'][i].get('calibration') == 'brightness_temperature': found_calibration = True _table_calibration += ', BT, ' _table_calibration += u'\u00B0' # '\u2103' @@ -377,7 +383,7 @@ def _add_calibration_datasets(self, ch, datasets, reverse_offset, reverse_scale, _reverse_offset = 255. _reverse_scale = -1. _decimals = 2 - elif ds.attrs['prerequisites'][i][4] == 'reflectance': + elif ds.attrs['prerequisites'][i].get('calibration') == 'reflectance': found_calibration = True _table_calibration += ', Reflectance(Albedo), ' _table_calibration += '[%]' @@ -692,9 +698,9 @@ def _save_datasets_as_mitiff(self, datasets, image_description, # Need to possible translate channels names from satpy to mitiff # Note the last index is a tuple index. - cn = cns.get(datasets.attrs['prerequisites'][0][0], - datasets.attrs['prerequisites'][0][0]) - data = self._calibrate_data(datasets, datasets.attrs['prerequisites'][0][4], + cn = cns.get(datasets.attrs['prerequisites'][0]['name'], + datasets.attrs['prerequisites'][0]['name']) + data = self._calibrate_data(datasets, datasets.attrs['prerequisites'][0].get('calibration'), self.mitiff_config[kwargs['sensor']][cn]['min-val'], self.mitiff_config[kwargs['sensor']][cn]['max-val']) @@ -706,9 +712,9 @@ def _save_datasets_as_mitiff(self, datasets, image_description, chn = datasets.sel(bands=band) # Need to possible translate channels names from satpy to mitiff # Note the last index is a tuple index. - cn = cns.get(chn.attrs['prerequisites'][_cn_i][0], - chn.attrs['prerequisites'][_cn_i][0]) - data = self._calibrate_data(chn, chn.attrs['prerequisites'][_cn_i][4], + cn = cns.get(chn.attrs['prerequisites'][_cn_i]['name'], + chn.attrs['prerequisites'][_cn_i]['name']) + data = self._calibrate_data(chn, chn.attrs['prerequisites'][_cn_i].get('calibration'), self.mitiff_config[kwargs['sensor']][cn]['min-val'], self.mitiff_config[kwargs['sensor']][cn]['max-val']) diff --git a/setup.py b/setup.py index 34c10a5a51..92a167fa2b 100644 --- a/setup.py +++ b/setup.py @@ -103,7 +103,8 @@ def _config_data_files(base_dirs, extensions=(".cfg", )): NAME = 'satpy' -README = open('README.rst', 'r').read() +with open('README.rst', 'r') as readme: + README = readme.read() setup(name=NAME, description='Python package for earth-observing satellite data processing', diff --git a/utils/fetch_avhrr_calcoeffs.py b/utils/fetch_avhrr_calcoeffs.py index b93bf16db0..7913a8e62b 100644 --- a/utils/fetch_avhrr_calcoeffs.py +++ b/utils/fetch_avhrr_calcoeffs.py @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License along with # satpy. If not, see . - +"""Fetch avhrr calibration coefficients.""" import urllib2 import h5py import datetime as dt @@ -49,12 +49,14 @@ "ch2": BASE_URL + "N19_AVHRR_Libya_ch2.txt"} } + def get_page(url): - '''Retrieve the given page.''' + """Retrieve the given page.""" return urllib2.urlopen(url).read() + def get_coeffs(page): - '''Parse coefficients from the page.''' + """Parse coefficients from the page.""" coeffs = {} coeffs['datetime'] = [] coeffs['slope1'] = [] @@ -99,8 +101,9 @@ def get_coeffs(page): return coeffs + def get_all_coeffs(): - '''Get all available calibration coefficients for the satellites.''' + """Get all available calibration coefficients for the satellites.""" coeffs = {} for platform in URLS.keys(): @@ -108,18 +111,19 @@ def get_all_coeffs(): coeffs[platform] = {} for chan in URLS[platform].keys(): url = URLS[platform][chan] - print url + print(url) page = get_page(url) coeffs[platform][chan] = get_coeffs(page) return coeffs + def save_coeffs(coeffs, out_dir=''): - '''Save calibration coefficients to HDF5 files.''' + """Save calibration coefficients to HDF5 files.""" for platform in coeffs.keys(): fname = os.path.join(out_dir, "%s_calibration_data.h5" % platform) fid = h5py.File(fname, 'w') - + for chan in coeffs[platform].keys(): fid.create_group(chan) fid[chan]['datetime'] = coeffs[platform][chan]['datetime'] @@ -129,13 +133,15 @@ def save_coeffs(coeffs, out_dir=''): fid[chan]['intercept2'] = coeffs[platform][chan]['intercept2'] fid.close() - print "Calibration coefficients saved for %s" % platform + print("Calibration coefficients saved for %s" % platform) + def main(): - '''Create calibration coefficient files for AVHRR''' + """Create calibration coefficient files for AVHRR.""" out_dir = sys.argv[1] coeffs = get_all_coeffs() save_coeffs(coeffs, out_dir=out_dir) + if __name__ == "__main__": main()