From ce153852771fe6b0a45534df20b061a6f559842e Mon Sep 17 00:00:00 2001 From: keewis Date: Thu, 27 Aug 2020 16:56:37 +0200 Subject: [PATCH] run black and blackdoc (#4381) --- xarray/backends/file_manager.py | 3 +- xarray/backends/h5netcdf_.py | 3 +- xarray/backends/lru_cache.py | 3 +- xarray/backends/pseudonetcdf_.py | 3 +- xarray/backends/pynio_.py | 3 +- xarray/backends/rasterio_.py | 2 +- xarray/backends/zarr.py | 3 +- xarray/coding/cftime_offsets.py | 6 +-- xarray/coding/strings.py | 6 +-- xarray/coding/variables.py | 6 +-- xarray/convert.py | 29 ++++------- xarray/core/accessor_dt.py | 3 +- xarray/core/common.py | 51 +++++++++----------- xarray/core/computation.py | 3 +- xarray/core/coordinates.py | 3 +- xarray/core/dask_array_compat.py | 4 +- xarray/core/dask_array_ops.py | 6 +-- xarray/core/dataarray.py | 50 ++++++++----------- xarray/core/dataset.py | 57 +++++++++------------- xarray/core/dtypes.py | 3 +- xarray/core/duck_array_ops.py | 29 +++++------ xarray/core/formatting_html.py | 3 +- xarray/core/groupby.py | 12 ++--- xarray/core/indexes.py | 3 +- xarray/core/indexing.py | 18 +++---- xarray/core/merge.py | 13 +++-- xarray/core/missing.py | 15 +++--- xarray/core/nanops.py | 2 +- xarray/core/nputils.py | 3 +- xarray/core/parallel.py | 9 ++-- xarray/core/resample.py | 3 +- xarray/core/resample_cftime.py | 4 +- xarray/core/utils.py | 21 +++----- xarray/core/variable.py | 27 +++++------ xarray/core/weighted.py | 2 +- xarray/plot/facetgrid.py | 3 +- xarray/plot/utils.py | 4 +- xarray/testing.py | 4 +- xarray/tests/test_computation.py | 22 ++++++--- xarray/tests/test_dask.py | 4 +- xarray/tests/test_duck_array_ops.py | 2 +- xarray/tests/test_interp.py | 6 ++- xarray/tests/test_plot.py | 8 +-- xarray/tests/test_units.py | 75 +++++++++++++++++++++-------- xarray/util/print_versions.py | 2 +- 45 files changed, 255 insertions(+), 286 deletions(-) diff --git a/xarray/backends/file_manager.py b/xarray/backends/file_manager.py index 549426b5d07..4b9c95ec792 100644 --- a/xarray/backends/file_manager.py +++ b/xarray/backends/file_manager.py @@ -314,8 +314,7 @@ def __hash__(self): class DummyFileManager(FileManager): - """FileManager that simply wraps an open file in the FileManager interface. - """ + """FileManager that simply wraps an open file in the FileManager interface.""" def __init__(self, value): self._value = value diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py index 393db14a7e9..f3e61eeee74 100644 --- a/xarray/backends/h5netcdf_.py +++ b/xarray/backends/h5netcdf_.py @@ -67,8 +67,7 @@ def _h5netcdf_create_group(dataset, name): class H5NetCDFStore(WritableCFDataStore): - """Store for reading and writing data via h5netcdf - """ + """Store for reading and writing data via h5netcdf""" __slots__ = ( "autoclose", diff --git a/xarray/backends/lru_cache.py b/xarray/backends/lru_cache.py index 56062256001..5ca49a0311a 100644 --- a/xarray/backends/lru_cache.py +++ b/xarray/backends/lru_cache.py @@ -55,8 +55,7 @@ def __getitem__(self, key: K) -> V: return value def _enforce_size_limit(self, capacity: int) -> None: - """Shrink the cache if necessary, evicting the oldest items. - """ + """Shrink the cache if necessary, evicting the oldest items.""" while len(self._cache) > capacity: key, value = self._cache.popitem(last=False) if self._on_evict is not None: diff --git a/xarray/backends/pseudonetcdf_.py b/xarray/backends/pseudonetcdf_.py index 17a4eb8f6bf..03f7330f351 100644 --- a/xarray/backends/pseudonetcdf_.py +++ b/xarray/backends/pseudonetcdf_.py @@ -35,8 +35,7 @@ def _getitem(self, key): class PseudoNetCDFDataStore(AbstractDataStore): - """Store for accessing datasets via PseudoNetCDF - """ + """Store for accessing datasets via PseudoNetCDF""" @classmethod def open(cls, filename, lock=None, mode=None, **format_kwargs): diff --git a/xarray/backends/pynio_.py b/xarray/backends/pynio_.py index 1c66ff1ee48..bca70973c0b 100644 --- a/xarray/backends/pynio_.py +++ b/xarray/backends/pynio_.py @@ -41,8 +41,7 @@ def _getitem(self, key): class NioDataStore(AbstractDataStore): - """Store for accessing datasets via PyNIO - """ + """Store for accessing datasets via PyNIO""" def __init__(self, filename, mode="r", lock=None, **kwargs): import Nio diff --git a/xarray/backends/rasterio_.py b/xarray/backends/rasterio_.py index 661d5b5c6fc..a0500c7e1c2 100644 --- a/xarray/backends/rasterio_.py +++ b/xarray/backends/rasterio_.py @@ -50,7 +50,7 @@ def shape(self): return self._shape def _get_indexer(self, key): - """ Get indexer for rasterio array. + """Get indexer for rasterio array. Parameter --------- diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index e1d46e9c347..89ba97b3fa5 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -257,8 +257,7 @@ def encode_zarr_variable(var, needs_copy=True, name=None): class ZarrStore(AbstractWritableDataStore): - """Store for reading and writing data via zarr - """ + """Store for reading and writing data via zarr""" __slots__ = ( "append_dim", diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index 4e77530dfdb..5ca4f5f6df3 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -221,8 +221,7 @@ def _adjust_n_years(other, n, month, reference_day): def _shift_month(date, months, day_option="start"): - """Shift the date to a month start or end a given number of months away. - """ + """Shift the date to a month start or end a given number of months away.""" import cftime delta_year = (date.month + months) // 12 @@ -354,8 +353,7 @@ def onOffset(self, date): class QuarterOffset(BaseCFTimeOffset): - """Quarter representation copied off of pandas/tseries/offsets.py - """ + """Quarter representation copied off of pandas/tseries/offsets.py""" _freq: ClassVar[str] _default_month: ClassVar[int] diff --git a/xarray/coding/strings.py b/xarray/coding/strings.py index 35cc190ffe3..8d7f777d1d5 100644 --- a/xarray/coding/strings.py +++ b/xarray/coding/strings.py @@ -145,8 +145,7 @@ def bytes_to_char(arr): def _numpy_bytes_to_char(arr): - """Like netCDF4.stringtochar, but faster and more flexible. - """ + """Like netCDF4.stringtochar, but faster and more flexible.""" # ensure the array is contiguous arr = np.array(arr, copy=False, order="C", dtype=np.string_) return arr.reshape(arr.shape + (1,)).view("S1") @@ -189,8 +188,7 @@ def char_to_bytes(arr): def _numpy_char_to_bytes(arr): - """Like netCDF4.chartostring, but faster and more flexible. - """ + """Like netCDF4.chartostring, but faster and more flexible.""" # based on: http://stackoverflow.com/a/10984878/809705 arr = np.array(arr, copy=False, order="C") dtype = "S" + str(arr.shape[-1]) diff --git a/xarray/coding/variables.py b/xarray/coding/variables.py index 28ead397461..afb50fa517a 100644 --- a/xarray/coding/variables.py +++ b/xarray/coding/variables.py @@ -35,15 +35,13 @@ class VariableCoder: def encode( self, variable: Variable, name: Hashable = None ) -> Variable: # pragma: no cover - """Convert an encoded variable to a decoded variable - """ + """Convert an encoded variable to a decoded variable""" raise NotImplementedError() def decode( self, variable: Variable, name: Hashable = None ) -> Variable: # pragma: no cover - """Convert an decoded variable to a encoded variable - """ + """Convert an decoded variable to a encoded variable""" raise NotImplementedError() diff --git a/xarray/convert.py b/xarray/convert.py index 395581bace7..43e9ce94fb7 100644 --- a/xarray/convert.py +++ b/xarray/convert.py @@ -55,14 +55,12 @@ def encode(var): def _filter_attrs(attrs, ignored_attrs): - """ Return attrs that are not in ignored_attrs - """ + """Return attrs that are not in ignored_attrs""" return {k: v for k, v in attrs.items() if k not in ignored_attrs} def from_cdms2(variable): - """Convert a cdms2 variable into an DataArray - """ + """Convert a cdms2 variable into an DataArray""" values = np.asarray(variable) name = variable.id dims = variable.getAxisIds() @@ -89,8 +87,7 @@ def from_cdms2(variable): def to_cdms2(dataarray, copy=True): - """Convert a DataArray into a cdms2 variable - """ + """Convert a DataArray into a cdms2 variable""" # we don't want cdms2 to be a hard dependency import cdms2 @@ -151,14 +148,12 @@ def set_cdms2_attrs(var, attrs): def _pick_attrs(attrs, keys): - """ Return attrs with keys in keys list - """ + """Return attrs with keys in keys list""" return {k: v for k, v in attrs.items() if k in keys} def _get_iris_args(attrs): - """ Converts the xarray attrs into args that can be passed into Iris - """ + """Converts the xarray attrs into args that can be passed into Iris""" # iris.unit is deprecated in Iris v1.9 import cf_units @@ -172,8 +167,7 @@ def _get_iris_args(attrs): # TODO: Add converting bounds from xarray to Iris and back def to_iris(dataarray): - """ Convert a DataArray into a Iris Cube - """ + """Convert a DataArray into a Iris Cube""" # Iris not a hard dependency import iris from iris.fileformats.netcdf import parse_cell_methods @@ -213,8 +207,7 @@ def to_iris(dataarray): def _iris_obj_to_attrs(obj): - """ Return a dictionary of attrs when given a Iris object - """ + """Return a dictionary of attrs when given a Iris object""" attrs = {"standard_name": obj.standard_name, "long_name": obj.long_name} if obj.units.calendar: attrs["calendar"] = obj.units.calendar @@ -225,8 +218,7 @@ def _iris_obj_to_attrs(obj): def _iris_cell_methods_to_str(cell_methods_obj): - """ Converts a Iris cell methods into a string - """ + """Converts a Iris cell methods into a string""" cell_methods = [] for cell_method in cell_methods_obj: names = "".join(f"{n}: " for n in cell_method.coord_names) @@ -242,7 +234,7 @@ def _iris_cell_methods_to_str(cell_methods_obj): def _name(iris_obj, default="unknown"): - """ Mimicks `iris_obj.name()` but with different name resolution order. + """Mimicks `iris_obj.name()` but with different name resolution order. Similar to iris_obj.name() method, but using iris_obj.var_name first to enable roundtripping. @@ -251,8 +243,7 @@ def _name(iris_obj, default="unknown"): def from_iris(cube): - """ Convert a Iris cube into an DataArray - """ + """Convert a Iris cube into an DataArray""" import iris.exceptions from xarray.core.pycompat import dask_array_type diff --git a/xarray/core/accessor_dt.py b/xarray/core/accessor_dt.py index a84da37986e..214b4352c8a 100644 --- a/xarray/core/accessor_dt.py +++ b/xarray/core/accessor_dt.py @@ -10,8 +10,7 @@ def _season_from_months(months): - """Compute season (DJF, MAM, JJA, SON) from month ordinal - """ + """Compute season (DJF, MAM, JJA, SON) from month ordinal""" # TODO: Move "season" accessor upstream into pandas seasons = np.array(["DJF", "MAM", "JJA", "SON"]) months = np.asarray(months) diff --git a/xarray/core/common.py b/xarray/core/common.py index 91bfb87a839..b693ed7832f 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -111,8 +111,7 @@ def wrapped_func(self, dim=None, **kwargs): # type: ignore class AbstractArray(ImplementsArrayReduce): - """Shared base class for DataArray and Variable. - """ + """Shared base class for DataArray and Variable.""" __slots__ = () @@ -188,8 +187,7 @@ def sizes(self: Any) -> Mapping[Hashable, int]: class AttrAccessMixin: - """Mixin class that allows getting keys with attribute access - """ + """Mixin class that allows getting keys with attribute access""" __slots__ = () @@ -212,14 +210,12 @@ def __init_subclass__(cls): @property def _attr_sources(self) -> List[Mapping[Hashable, Any]]: - """List of places to look-up items for attribute-style access - """ + """List of places to look-up items for attribute-style access""" return [] @property def _item_sources(self) -> List[Mapping[Hashable, Any]]: - """List of places to look-up items for key-autocompletion - """ + """List of places to look-up items for key-autocompletion""" return [] def __getattr__(self, name: str) -> Any: @@ -239,8 +235,7 @@ def __getattr__(self, name: str) -> Any: # runtime before every single assignment. All of this is just temporary until the # FutureWarning can be changed into a hard crash. def _setattr_dict(self, name: str, value: Any) -> None: - """Deprecated third party subclass (see ``__init_subclass__`` above) - """ + """Deprecated third party subclass (see ``__init_subclass__`` above)""" object.__setattr__(self, name, value) if name in self.__dict__: # Custom, non-slotted attr, or improperly assigned variable? @@ -304,8 +299,7 @@ def get_squeeze_dims( dim: Union[Hashable, Iterable[Hashable], None] = None, axis: Union[int, Iterable[int], None] = None, ) -> List[Hashable]: - """Get a list of dimensions to squeeze out. - """ + """Get a list of dimensions to squeeze out.""" if dim is not None and axis is not None: raise ValueError("cannot use both parameters `axis` and `dim`") if dim is None and axis is None: @@ -374,8 +368,7 @@ def squeeze( return self.isel(drop=drop, **{d: 0 for d in dims}) def get_index(self, key: Hashable) -> pd.Index: - """Get an index for a dimension, with fall-back to a default RangeIndex - """ + """Get an index for a dimension, with fall-back to a default RangeIndex""" if key not in self.dims: raise KeyError(key) @@ -423,7 +416,9 @@ def assign_coords(self, coords=None, **coords_kwargs): Convert longitude coordinates from 0-359 to -180-179: >>> da = xr.DataArray( - ... np.random.rand(4), coords=[np.array([358, 359, 0, 1])], dims="lon", + ... np.random.rand(4), + ... coords=[np.array([358, 359, 0, 1])], + ... dims="lon", ... ) >>> da @@ -830,7 +825,9 @@ def rolling( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( - ... "15/12/1999", periods=12, freq=pd.DateOffset(months=1), + ... "15/12/1999", + ... periods=12, + ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", @@ -1037,7 +1034,9 @@ def resample( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( - ... "15/12/1999", periods=12, freq=pd.DateOffset(months=1), + ... "15/12/1999", + ... periods=12, + ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", @@ -1242,8 +1241,7 @@ def where(self, cond, other=dtypes.NA, drop: bool = False): return ops.where_method(self, cond, other) def close(self: Any) -> None: - """Close any files linked to this object - """ + """Close any files linked to this object""" if self._file_obj is not None: self._file_obj.close() self._file_obj = None @@ -1503,8 +1501,7 @@ def full_like(other, fill_value, dtype: DTypeLike = None): def _full_like_variable(other, fill_value, dtype: DTypeLike = None): - """Inner function of full_like, where other must be a variable - """ + """Inner function of full_like, where other must be a variable""" from .variable import Variable if fill_value is dtypes.NA: @@ -1637,20 +1634,17 @@ def ones_like(other, dtype: DTypeLike = None): def is_np_datetime_like(dtype: DTypeLike) -> bool: - """Check if a dtype is a subclass of the numpy datetime types - """ + """Check if a dtype is a subclass of the numpy datetime types""" return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64) def is_np_timedelta_like(dtype: DTypeLike) -> bool: - """Check whether dtype is of the timedelta64 dtype. - """ + """Check whether dtype is of the timedelta64 dtype.""" return np.issubdtype(dtype, np.timedelta64) def _contains_cftime_datetimes(array) -> bool: - """Check if an array contains cftime.datetime objects - """ + """Check if an array contains cftime.datetime objects""" try: from cftime import datetime as cftime_datetime except ImportError: @@ -1668,8 +1662,7 @@ def _contains_cftime_datetimes(array) -> bool: def contains_cftime_datetimes(var) -> bool: - """Check if an xarray.Variable contains cftime.datetime objects - """ + """Check if an xarray.Variable contains cftime.datetime objects""" return _contains_cftime_datetimes(var.data) diff --git a/xarray/core/computation.py b/xarray/core/computation.py index e9110cbfead..a2fec799a70 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -592,8 +592,7 @@ def apply_variable_ufunc( keep_attrs=False, dask_gufunc_kwargs=None, ): - """Apply a ndarray level function over Variable and/or ndarray objects. - """ + """Apply a ndarray level function over Variable and/or ndarray objects.""" from .variable import Variable, as_compatible_data dim_sizes = unified_dim_sizes( diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 83c4d2a8636..a4b8ca478eb 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -214,8 +214,7 @@ def __getitem__(self, key: Hashable) -> "DataArray": return cast("DataArray", self._data[key]) def to_dataset(self) -> "Dataset": - """Convert these coordinates into a new Dataset - """ + """Convert these coordinates into a new Dataset""" return self._data._copy_listed(self._names) def _update_coords( diff --git a/xarray/core/dask_array_compat.py b/xarray/core/dask_array_compat.py index b32f225f6b4..4efbaad0855 100644 --- a/xarray/core/dask_array_compat.py +++ b/xarray/core/dask_array_compat.py @@ -21,7 +21,7 @@ import numbers def meta_from_array(x, ndim=None, dtype=None): - """ Normalize an array to appropriate meta object + """Normalize an array to appropriate meta object Parameters ---------- @@ -101,7 +101,7 @@ def meta_from_array(x, ndim=None, dtype=None): def _validate_pad_output_shape(input_shape, pad_width, output_shape): - """ Validates the output shape of dask.array.pad, raising a RuntimeError if they do not match. + """Validates the output shape of dask.array.pad, raising a RuntimeError if they do not match. In the current versions of dask (2.2/2.4), dask.array.pad with mode='reflect' sometimes returns an invalid shape. """ diff --git a/xarray/core/dask_array_ops.py b/xarray/core/dask_array_ops.py index 2184cbf77e8..7c390770def 100644 --- a/xarray/core/dask_array_ops.py +++ b/xarray/core/dask_array_ops.py @@ -4,8 +4,7 @@ def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1): - """Wrapper to apply bottleneck moving window funcs on dask arrays - """ + """Wrapper to apply bottleneck moving window funcs on dask arrays""" import dask.array as da dtype, fill_value = dtypes.maybe_promote(a.dtype) @@ -28,8 +27,7 @@ def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1): def rolling_window(a, axis, window, center, fill_value): - """Dask's equivalence to np.utils.rolling_window - """ + """Dask's equivalence to np.utils.rolling_window""" import dask.array as da if not hasattr(axis, "__len__"): diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 0eaffcee0e2..75e3d612786 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -518,8 +518,7 @@ def to_dataset( @property def name(self) -> Optional[Hashable]: - """The name of this array. - """ + """The name of this array.""" return self._name @name.setter @@ -556,8 +555,7 @@ def __len__(self) -> int: @property def data(self) -> Any: - """The array's data as a dask or numpy array - """ + """The array's data as a dask or numpy array""" return self.variable.data @data.setter @@ -664,14 +662,12 @@ def __delitem__(self, key: Any) -> None: @property def _attr_sources(self) -> List[Mapping[Hashable, Any]]: - """List of places to look-up items for attribute-style access - """ + """List of places to look-up items for attribute-style access""" return self._item_sources + [self.attrs] @property def _item_sources(self) -> List[Mapping[Hashable, Any]]: - """List of places to look-up items for key-completion - """ + """List of places to look-up items for key-completion""" return [ self.coords, {d: self.coords[d] for d in self.dims}, @@ -683,8 +679,7 @@ def __contains__(self, key: Any) -> bool: @property def loc(self) -> _LocIndexer: - """Attribute for location based indexing like pandas. - """ + """Attribute for location based indexing like pandas.""" return _LocIndexer(self) @property @@ -709,16 +704,14 @@ def encoding(self, value: Mapping[Hashable, Any]) -> None: @property def indexes(self) -> Indexes: - """Mapping of pandas.Index objects used for label based indexing - """ + """Mapping of pandas.Index objects used for label based indexing""" if self._indexes is None: self._indexes = default_indexes(self._coords, self.dims) return Indexes(self._indexes) @property def coords(self) -> DataArrayCoordinates: - """Dictionary-like container of coordinate arrays. - """ + """Dictionary-like container of coordinate arrays.""" return DataArrayCoordinates(self) def reset_coords( @@ -840,7 +833,7 @@ def compute(self, **kwargs) -> "DataArray": return new.load(**kwargs) def persist(self, **kwargs) -> "DataArray": - """ Trigger computation in constituent dask arrays + """Trigger computation in constituent dask arrays This keeps them as dask arrays but encourages them to keep data in memory. This is particularly useful when on a distributed machine. @@ -1414,7 +1407,7 @@ def interp( kwargs: Mapping[str, Any] = None, **coords_kwargs: Any, ) -> "DataArray": - """ Multidimensional interpolation of variables. + """Multidimensional interpolation of variables. Parameters ---------- @@ -1590,7 +1583,9 @@ def swap_dims(self, dims_dict: Mapping[Hashable, Hashable]) -> "DataArray": -------- >>> arr = xr.DataArray( - ... data=[0, 1], dims="x", coords={"x": ["a", "b"], "y": ("x", [0, 1])}, + ... data=[0, 1], + ... dims="x", + ... coords={"x": ["a", "b"], "y": ("x", [0, 1])}, ... ) >>> arr @@ -2605,38 +2600,33 @@ def from_series(cls, series: pd.Series, sparse: bool = False) -> "DataArray": return result def to_cdms2(self) -> "cdms2_Variable": - """Convert this array into a cdms2.Variable - """ + """Convert this array into a cdms2.Variable""" from ..convert import to_cdms2 return to_cdms2(self) @classmethod def from_cdms2(cls, variable: "cdms2_Variable") -> "DataArray": - """Convert a cdms2.Variable into an xarray.DataArray - """ + """Convert a cdms2.Variable into an xarray.DataArray""" from ..convert import from_cdms2 return from_cdms2(variable) def to_iris(self) -> "iris_Cube": - """Convert this array into a iris.cube.Cube - """ + """Convert this array into a iris.cube.Cube""" from ..convert import to_iris return to_iris(self) @classmethod def from_iris(cls, cube: "iris_Cube") -> "DataArray": - """Convert a iris.cube.Cube into an xarray.DataArray - """ + """Convert a iris.cube.Cube into an xarray.DataArray""" from ..convert import from_iris return from_iris(cube) def _all_compat(self, other: "DataArray", compat_str: str) -> bool: - """Helper function for equals, broadcast_equals, and identical - """ + """Helper function for equals, broadcast_equals, and identical""" def compat(x, y): return getattr(x.variable, compat_str)(y.variable) @@ -3327,7 +3317,7 @@ def integrate( return self._from_temp_dataset(ds) def unify_chunks(self) -> "DataArray": - """ Unify chunk size along all chunked dimensions of this DataArray. + """Unify chunk size along all chunked dimensions of this DataArray. Returns ------- @@ -3434,7 +3424,9 @@ def map_blocks( to the function being applied in ``xr.map_blocks()``: >>> array.map_blocks( - ... calculate_anomaly, kwargs={"groupby_type": "time.year"}, template=array, + ... calculate_anomaly, + ... kwargs={"groupby_type": "time.year"}, + ... template=array, ... ) # doctest: +ELLIPSIS dask.array diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index e49d09b2381..dbbae01dd22 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -577,8 +577,7 @@ def variables(self) -> Mapping[Hashable, Variable]: @property def attrs(self) -> Dict[Hashable, Any]: - """Dictionary of global attributes on this dataset - """ + """Dictionary of global attributes on this dataset""" if self._attrs is None: self._attrs = {} return self._attrs @@ -589,8 +588,7 @@ def attrs(self, value: Mapping[Hashable, Any]) -> None: @property def encoding(self) -> Dict: - """Dictionary of global encoding attributes on this dataset - """ + """Dictionary of global encoding attributes on this dataset""" if self._encoding is None: self._encoding = {} return self._encoding @@ -814,8 +812,7 @@ def compute(self, **kwargs) -> "Dataset": return new.load(**kwargs) def _persist_inplace(self, **kwargs) -> "Dataset": - """Persist all Dask arrays in memory - """ + """Persist all Dask arrays in memory""" # access .data to coerce everything to numpy or dask arrays lazy_data = { k: v._data @@ -834,7 +831,7 @@ def _persist_inplace(self, **kwargs) -> "Dataset": return self def persist(self, **kwargs) -> "Dataset": - """ Trigger computation, keeping data as dask arrays + """Trigger computation, keeping data as dask arrays This operation can be used to trigger computation on underlying dask arrays, similar to ``.compute()`` or ``.load()``. However this @@ -1018,7 +1015,8 @@ def copy(self, deep: bool = False, data: Mapping = None) -> "Dataset": >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset( - ... {"foo": da, "bar": ("x", [-1, 2])}, coords={"x": ["one", "two"]}, + ... {"foo": da, "bar": ("x", [-1, 2])}, + ... coords={"x": ["one", "two"]}, ... ) >>> ds.copy() @@ -1158,8 +1156,7 @@ def _copy_listed(self, names: Iterable[Hashable]) -> "Dataset": return self._replace(variables, coord_names, dims, indexes=indexes) def _construct_dataarray(self, name: Hashable) -> "DataArray": - """Construct a DataArray by indexing this dataset - """ + """Construct a DataArray by indexing this dataset""" from .dataarray import DataArray try: @@ -1193,14 +1190,12 @@ def __deepcopy__(self, memo=None) -> "Dataset": @property def _attr_sources(self) -> List[Mapping[Hashable, Any]]: - """List of places to look-up items for attribute-style access - """ + """List of places to look-up items for attribute-style access""" return self._item_sources + [self.attrs] @property def _item_sources(self) -> List[Mapping[Hashable, Any]]: - """List of places to look-up items for key-completion - """ + """List of places to look-up items for key-completion""" return [ self.data_vars, self.coords, @@ -1288,8 +1283,7 @@ def __setitem__(self, key: Hashable, value) -> None: self.update({key: value}) def __delitem__(self, key: Hashable) -> None: - """Remove a variable from this dataset. - """ + """Remove a variable from this dataset.""" del self._variables[key] self._coord_names.discard(key) if key in self.indexes: @@ -1302,8 +1296,7 @@ def __delitem__(self, key: Hashable) -> None: __hash__ = None # type: ignore def _all_compat(self, other: "Dataset", compat_str: str) -> bool: - """Helper function for equals and identical - """ + """Helper function for equals and identical""" # some stores (e.g., scipy) do not seem to preserve order, so don't # require matching order for equality @@ -1370,8 +1363,7 @@ def identical(self, other: "Dataset") -> bool: @property def indexes(self) -> Indexes: - """Mapping of pandas.Index objects used for label based indexing - """ + """Mapping of pandas.Index objects used for label based indexing""" if self._indexes is None: self._indexes = default_indexes(self._variables, self._dims) return Indexes(self._indexes) @@ -1385,8 +1377,7 @@ def coords(self) -> DatasetCoordinates: @property def data_vars(self) -> DataVariables: - """Dictionary of DataArray objects corresponding to data variables - """ + """Dictionary of DataArray objects corresponding to data variables""" return DataVariables(self) def set_coords( @@ -1464,8 +1455,7 @@ def reset_coords( return obj def dump_to_store(self, store: "AbstractDataStore", **kwargs) -> None: - """Store dataset contents to a backends.*DataStore object. - """ + """Store dataset contents to a backends.*DataStore object.""" from ..backends.api import dump_to_store # TODO: rename and/or cleanup this method to make it more consistent @@ -1800,7 +1790,7 @@ def maybe_chunk(name, var, chunks): def _validate_indexers( self, indexers: Mapping[Hashable, Any], missing_dims: str = "raise" ) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]: - """ Here we make sure + """Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the @@ -1842,8 +1832,7 @@ def _validate_indexers( def _validate_interp_indexers( self, indexers: Mapping[Hashable, Any] ) -> Iterator[Tuple[Hashable, Variable]]: - """Variant of _validate_indexers to be used for interpolation - """ + """Variant of _validate_indexers to be used for interpolation""" for k, v in self._validate_indexers(indexers): if isinstance(v, Variable): if v.ndim == 1: @@ -2597,7 +2586,7 @@ def interp( kwargs: Mapping[str, Any] = None, **coords_kwargs: Any, ) -> "Dataset": - """ Multidimensional interpolation of Dataset. + """Multidimensional interpolation of Dataset. Parameters ---------- @@ -4879,7 +4868,7 @@ def from_dict(cls, d): "attrs": {"title": "air temperature"}, "dims": "t", "data_vars": { - "a": {"dims": "t", "data": x,}, + "a": {"dims": "t", "data": x}, "b": {"dims": "t", "data": y}, }, } @@ -5788,7 +5777,7 @@ def filter_by_attrs(self, **kwargs): return self[selection] def unify_chunks(self) -> "Dataset": - """ Unify chunk size along all chunked dimensions of this Dataset. + """Unify chunk size along all chunked dimensions of this Dataset. Returns ------- @@ -5925,7 +5914,9 @@ def map_blocks( to the function being applied in ``xr.map_blocks()``: >>> ds.map_blocks( - ... calculate_anomaly, kwargs={"groupby_type": "time.year"}, template=ds, + ... calculate_anomaly, + ... kwargs={"groupby_type": "time.year"}, + ... template=ds, ... ) Dimensions: (time: 24) @@ -6528,7 +6519,7 @@ def argmin(self, dim=None, axis=None, **kwargs): -------- DataArray.argmin - """ + """ if dim is None and axis is None: warnings.warn( "Once the behaviour of DataArray.argmin() and Variable.argmin() with " @@ -6591,7 +6582,7 @@ def argmax(self, dim=None, axis=None, **kwargs): -------- DataArray.argmax - """ + """ if dim is None and axis is None: warnings.warn( "Once the behaviour of DataArray.argmax() and Variable.argmax() with " diff --git a/xarray/core/dtypes.py b/xarray/core/dtypes.py index 4db2990accc..167f00fa932 100644 --- a/xarray/core/dtypes.py +++ b/xarray/core/dtypes.py @@ -137,8 +137,7 @@ def get_neg_infinity(dtype): def is_datetime_like(dtype): - """Check if a dtype is a subclass of the numpy datetime types - """ + """Check if a dtype is a subclass of the numpy datetime types""" return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64) diff --git a/xarray/core/duck_array_ops.py b/xarray/core/duck_array_ops.py index e64fea2ccf0..16bdd0e0fa6 100644 --- a/xarray/core/duck_array_ops.py +++ b/xarray/core/duck_array_ops.py @@ -200,10 +200,10 @@ def as_shared_dtype(scalars_or_arrays): def lazy_array_equiv(arr1, arr2): """Like array_equal, but doesn't actually compare values. - Returns True when arr1, arr2 identical or their dask names are equal. - Returns False when shapes are not equal. - Returns None when equality cannot determined: one or both of arr1, arr2 are numpy arrays; - or their dask names are not equal + Returns True when arr1, arr2 identical or their dask names are equal. + Returns False when shapes are not equal. + Returns None when equality cannot determined: one or both of arr1, arr2 are numpy arrays; + or their dask names are not equal """ if arr1 is arr2: return True @@ -225,8 +225,7 @@ def lazy_array_equiv(arr1, arr2): def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8): - """Like np.allclose, but also allows values to be NaN in both arrays - """ + """Like np.allclose, but also allows values to be NaN in both arrays""" arr1 = asarray(arr1) arr2 = asarray(arr2) @@ -238,8 +237,7 @@ def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8): def array_equiv(arr1, arr2): - """Like np.array_equal, but also allows values to be NaN in both arrays - """ + """Like np.array_equal, but also allows values to be NaN in both arrays""" arr1 = asarray(arr1) arr2 = asarray(arr2) lazy_equiv = lazy_array_equiv(arr1, arr2) @@ -269,8 +267,7 @@ def array_notnull_equiv(arr1, arr2): def count(data, axis=None): - """Count the number of non-NA in this array along the given axis or axes - """ + """Count the number of non-NA in this array along the given axis or axes""" return np.sum(np.logical_not(isnull(data)), axis=axis) @@ -523,8 +520,7 @@ def pd_timedelta_to_float(value, datetime_unit): def py_timedelta_to_float(array, datetime_unit): - """Convert a timedelta object to a float, possibly at a loss of resolution. - """ + """Convert a timedelta object to a float, possibly at a loss of resolution.""" array = np.asarray(array) array = np.reshape([a.total_seconds() for a in array.ravel()], array.shape) * 1e6 conversion_factor = np.timedelta64(1, "us") / np.timedelta64(1, datetime_unit) @@ -596,8 +592,7 @@ def cumsum(array, axis=None, **kwargs): def first(values, axis, skipna=None): - """Return the first non-NA elements in this array along the given axis - """ + """Return the first non-NA elements in this array along the given axis""" if (skipna or skipna is None) and values.dtype.kind not in "iSU": # only bother for dtypes that can hold NaN _fail_on_dask_array_input_skipna(values) @@ -606,8 +601,7 @@ def first(values, axis, skipna=None): def last(values, axis, skipna=None): - """Return the last non-NA elements in this array along the given axis - """ + """Return the last non-NA elements in this array along the given axis""" if (skipna or skipna is None) and values.dtype.kind not in "iSU": # only bother for dtypes that can hold NaN _fail_on_dask_array_input_skipna(values) @@ -627,8 +621,7 @@ def rolling_window(array, axis, window, center, fill_value): def least_squares(lhs, rhs, rcond=None, skipna=False): - """Return the coefficients and residuals of a least-squares fit. - """ + """Return the coefficients and residuals of a least-squares fit.""" if isinstance(rhs, dask_array_type): return dask_array_ops.least_squares(lhs, rhs, rcond=rcond, skipna=skipna) else: diff --git a/xarray/core/formatting_html.py b/xarray/core/formatting_html.py index ad72b2d7945..3392aef8da3 100644 --- a/xarray/core/formatting_html.py +++ b/xarray/core/formatting_html.py @@ -12,8 +12,7 @@ @lru_cache(None) def _load_static_files(): - """Lazily load the resource files into memory the first time they are needed - """ + """Lazily load the resource files into memory the first time they are needed""" return [ pkg_resources.resource_string("xarray", fname).decode("utf8") for fname in STATIC_FILES diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index 8fb343a97bf..a5d96bc66cc 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -102,8 +102,7 @@ def _is_one_or_none(obj): def _consolidate_slices(slices): - """Consolidate adjacent slices in a list of slices. - """ + """Consolidate adjacent slices in a list of slices.""" result = [] last_slice = slice(None) for slice_ in slices: @@ -688,13 +687,11 @@ def _first_or_last(self, op, skipna, keep_attrs): return self.reduce(op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs) def first(self, skipna=None, keep_attrs=None): - """Return the first element of each group along the group dimension - """ + """Return the first element of each group along the group dimension""" return self._first_or_last(duck_array_ops.first, skipna, keep_attrs) def last(self, skipna=None, keep_attrs=None): - """Return the last element of each group along the group dimension - """ + """Return the last element of each group along the group dimension""" return self._first_or_last(duck_array_ops.last, skipna, keep_attrs) def assign_coords(self, coords=None, **coords_kwargs): @@ -719,8 +716,7 @@ def _maybe_reorder(xarray_obj, dim, positions): class DataArrayGroupBy(GroupBy, ImplementsArrayReduce): - """GroupBy object specialized to grouping DataArray objects - """ + """GroupBy object specialized to grouping DataArray objects""" def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py index 6b7220fdfd4..84cf35d3b4f 100644 --- a/xarray/core/indexes.py +++ b/xarray/core/indexes.py @@ -130,8 +130,7 @@ def roll_index(index: pd.Index, count: int, axis: int = 0) -> pd.Index: def propagate_indexes( indexes: Optional[Dict[Hashable, pd.Index]], exclude: Optional[Any] = None ) -> Optional[Dict[Hashable, pd.Index]]: - """ Creates new indexes dict from existing dict optionally excluding some dimensions. - """ + """Creates new indexes dict from existing dict optionally excluding some dimensions.""" if exclude is None: exclude = () diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index 28ed2cfb16f..68c61ac13dd 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -464,8 +464,7 @@ def __init__(self, key): class ExplicitlyIndexed: - """Mixin to mark support for Indexer subclasses in indexing. - """ + """Mixin to mark support for Indexer subclasses in indexing.""" __slots__ = () @@ -502,8 +501,7 @@ def __getitem__(self, key): class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin): - """Wrap an array to make basic and outer indexing lazy. - """ + """Wrap an array to make basic and outer indexing lazy.""" __slots__ = ("array", "key") @@ -579,8 +577,7 @@ def __repr__(self): class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin): - """Wrap an array to make vectorized indexing lazy. - """ + """Wrap an array to make vectorized indexing lazy.""" __slots__ = ("array", "key") @@ -767,7 +764,7 @@ def _outer_to_numpy_indexer(key, shape): def _combine_indexers(old_key, shape, new_key): - """ Combine two indexers. + """Combine two indexers. Parameters ---------- @@ -852,7 +849,7 @@ def decompose_indexer( def _decompose_slice(key, size): - """ convert a slice to successive two slices. The first slice always has + """convert a slice to successive two slices. The first slice always has a positive step. """ start, stop, step = key.indices(size) @@ -1308,7 +1305,7 @@ class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin): __slots__ = ("array",) def __init__(self, array): - """ This adapter is created in Variable.__getitem__ in + """This adapter is created in Variable.__getitem__ in Variable._broadcast_indexes. """ self.array = array @@ -1363,8 +1360,7 @@ def transpose(self, order): class PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin): - """Wrap a pandas.Index to preserve dtypes and handle explicit indexing. - """ + """Wrap a pandas.Index to preserve dtypes and handle explicit indexing.""" __slots__ = ("array", "_dtype") diff --git a/xarray/core/merge.py b/xarray/core/merge.py index 08931bcc787..8a0ebfd35c2 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -56,7 +56,9 @@ ) -def broadcast_dimension_size(variables: List[Variable],) -> Dict[Hashable, int]: +def broadcast_dimension_size( + variables: List[Variable], +) -> Dict[Hashable, int]: """Extract dimension sizes from a dictionary of variables. Raises ValueError if any dimensions have different sizes. @@ -71,8 +73,7 @@ def broadcast_dimension_size(variables: List[Variable],) -> Dict[Hashable, int]: class MergeError(ValueError): - """Error class for merge failures due to incompatible arguments. - """ + """Error class for merge failures due to incompatible arguments.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? @@ -494,8 +495,7 @@ def assert_valid_explicit_coords(variables, dims, explicit_coords): def merge_attrs(variable_attrs, combine_attrs): - """Combine attributes from different variables according to combine_attrs - """ + """Combine attributes from different variables according to combine_attrs""" if not variable_attrs: # no attributes to merge return None @@ -875,8 +875,7 @@ def dataset_merge_method( join: str, fill_value: Any, ) -> _MergeResult: - """Guts of the Dataset.merge method. - """ + """Guts of the Dataset.merge method.""" # we are locked into supporting overwrite_vars for the Dataset.merge # method due for backwards compatibility # TODO: consider deprecating it? diff --git a/xarray/core/missing.py b/xarray/core/missing.py index 8c44ade7df7..7a5ffa48f77 100644 --- a/xarray/core/missing.py +++ b/xarray/core/missing.py @@ -45,8 +45,7 @@ def _get_nan_block_lengths(obj, dim: Hashable, index: Variable): class BaseInterpolator: - """Generic interpolator class for normalizing interpolation methods - """ + """Generic interpolator class for normalizing interpolation methods""" cons_kwargs: Dict[str, Any] call_kwargs: Dict[str, Any] @@ -196,8 +195,7 @@ def __init__( def _apply_over_vars_with_dim(func, self, dim=None, **kwargs): - """Wrapper for datasets - """ + """Wrapper for datasets""" ds = type(self)(coords=self.coords, attrs=self.attrs) for name, var in self.data_vars.items(): @@ -304,8 +302,7 @@ def interp_na( keep_attrs: bool = None, **kwargs, ): - """Interpolate values according to different methods. - """ + """Interpolate values according to different methods.""" from xarray.coding.cftimeindex import CFTimeIndex if dim is None: @@ -546,7 +543,7 @@ def _get_valid_fill_mask(arr, dim, limit): def _localize(var, indexes_coords): - """ Speed up for linear and nearest neighbor method. + """Speed up for linear and nearest neighbor method. Only consider a subspace that is needed for the interpolation """ indexes = {} @@ -571,7 +568,7 @@ def _localize(var, indexes_coords): def _floatize_x(x, new_x): - """ Make x and new_x float. + """Make x and new_x float. This is particulary useful for datetime dtype. x, new_x: tuple of np.ndarray """ @@ -591,7 +588,7 @@ def _floatize_x(x, new_x): def interp(var, indexes_coords, method, **kwargs): - """ Make an interpolation of Variable + """Make an interpolation of Variable Parameters ---------- diff --git a/xarray/core/nanops.py b/xarray/core/nanops.py index bc7dc510817..94af65ae723 100644 --- a/xarray/core/nanops.py +++ b/xarray/core/nanops.py @@ -43,7 +43,7 @@ def _maybe_null_out(result, axis, mask, min_count=1): def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs): - """ In house nanargmin, nanargmax for object arrays. Always return integer + """In house nanargmin, nanargmax for object arrays. Always return integer type """ valid_count = count(value, axis=axis) diff --git a/xarray/core/nputils.py b/xarray/core/nputils.py index b56172a240e..bae94121d42 100644 --- a/xarray/core/nputils.py +++ b/xarray/core/nputils.py @@ -90,8 +90,7 @@ def _is_contiguous(positions): def _advanced_indexer_subspaces(key): - """Indices of the advanced indexes subspaces for mixed indexing and vindex. - """ + """Indices of the advanced indexes subspaces for mixed indexing and vindex.""" if not isinstance(key, tuple): key = (key,) advanced_index_positions = [ diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py index 74a02015ce5..56e598d5135 100644 --- a/xarray/core/parallel.py +++ b/xarray/core/parallel.py @@ -124,8 +124,7 @@ def make_meta(obj): def infer_template( func: Callable[..., T_DSorDA], obj: Union[DataArray, Dataset], *args, **kwargs ) -> T_DSorDA: - """Infer return object by running the function on meta objects. - """ + """Infer return object by running the function on meta objects.""" meta_args = [make_meta(arg) for arg in (obj,) + args] try: @@ -257,14 +256,16 @@ def map_blocks( to the function being applied in ``xr.map_blocks()``: >>> array.map_blocks( - ... calculate_anomaly, kwargs={"groupby_type": "time.year"}, template=array, + ... calculate_anomaly, + ... kwargs={"groupby_type": "time.year"}, + ... template=array, ... ) # doctest: +ELLIPSIS dask.array Coordinates: * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 dask.array - """ + """ def _wrapper( func: Callable, diff --git a/xarray/core/resample.py b/xarray/core/resample.py index af9711a3cc3..a00dedc8d05 100644 --- a/xarray/core/resample.py +++ b/xarray/core/resample.py @@ -253,8 +253,7 @@ def apply(self, func, args=(), shortcut=None, **kwargs): class DatasetResample(DatasetGroupBy, Resample): - """DatasetGroupBy object specialized to resampling a specified dimension - """ + """DatasetGroupBy object specialized to resampling a specified dimension""" def __init__(self, *args, dim=None, resample_dim=None, **kwargs): diff --git a/xarray/core/resample_cftime.py b/xarray/core/resample_cftime.py index cfac224363d..882664cbb60 100644 --- a/xarray/core/resample_cftime.py +++ b/xarray/core/resample_cftime.py @@ -224,7 +224,7 @@ def _adjust_bin_edges(datetime_bins, offset, closed, index, labels): def _get_range_edges(first, last, offset, closed="left", base=0): - """ Get the correct starting and ending datetimes for the resampled + """Get the correct starting and ending datetimes for the resampled CFTimeIndex range. Parameters @@ -272,7 +272,7 @@ def _get_range_edges(first, last, offset, closed="left", base=0): def _adjust_dates_anchored(first, last, offset, closed="right", base=0): - """ First and last offsets should be calculated from the start day to fix + """First and last offsets should be calculated from the start day to fix an error cause by resampling across multiple days when a one day period is not a multiple of the frequency. See https://github.com/pandas-dev/pandas/issues/8683 diff --git a/xarray/core/utils.py b/xarray/core/utils.py index ac060215848..0952d185f85 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -304,16 +304,14 @@ def is_valid_numpy_dtype(dtype: Any) -> bool: def to_0d_object_array(value: Any) -> np.ndarray: - """Given a value, wrap it in a 0-D numpy.ndarray with dtype=object. - """ + """Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.""" result = np.empty((), dtype=object) result[()] = value return result def to_0d_array(value: Any) -> np.ndarray: - """Given a value, wrap it in a 0-D numpy.ndarray. - """ + """Given a value, wrap it in a 0-D numpy.ndarray.""" if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0): return np.array(value) else: @@ -566,8 +564,7 @@ def __repr__(self: Any) -> str: class ReprObject: - """Object that prints as the given value, for use with sentinel values. - """ + """Object that prints as the given value, for use with sentinel values.""" __slots__ = ("_value",) @@ -628,8 +625,7 @@ def is_uniform_spaced(arr, **kwargs) -> bool: def hashable(v: Any) -> bool: - """Determine whether `v` can be hashed. - """ + """Determine whether `v` can be hashed.""" try: hash(v) except TypeError: @@ -665,8 +661,7 @@ def ensure_us_time_resolution(val): class HiddenKeyDict(MutableMapping[K, V]): - """Acts like a normal dictionary, but hides certain keys. - """ + """Acts like a normal dictionary, but hides certain keys.""" __slots__ = ("_data", "_hidden_keys") @@ -728,7 +723,7 @@ def infix_dims(dims_supplied: Collection, dims_all: Collection) -> Iterator: def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable: - """ Get an new dimension name based on new_dim, that is not used in dims. + """Get an new dimension name based on new_dim, that is not used in dims. If the same name exists, we add an underscore(s) in the head. Example1: @@ -750,7 +745,7 @@ def drop_dims_from_indexers( dims: Union[list, Mapping[Hashable, int]], missing_dims: str, ) -> Mapping[Hashable, Any]: - """ Depending on the setting of missing_dims, drop any dimensions from indexers that + """Depending on the setting of missing_dims, drop any dimensions from indexers that are not present in dims. Parameters @@ -794,7 +789,7 @@ def drop_dims_from_indexers( class UncachedAccessor: - """ Acts like a property, but on both classes and class instances + """Acts like a property, but on both classes and class instances This class is necessary because some tools (e.g. pydoc and sphinx) inspect classes for which property returns itself and not the diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 7c7a635e347..7c398066830 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -45,7 +45,12 @@ ) NON_NUMPY_SUPPORTED_ARRAY_TYPES = ( - (indexing.ExplicitlyIndexed, pd.Index,) + dask_array_type + cupy_array_type + ( + indexing.ExplicitlyIndexed, + pd.Index, + ) + + dask_array_type + + cupy_array_type ) # https://github.com/python/mypy/issues/224 BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore @@ -64,8 +69,7 @@ def f(self: VariableType, ...) -> VariableType: class MissingDimensionsError(ValueError): - """Error class used when we can't safely guess a dimension name. - """ + """Error class used when we can't safely guess a dimension name.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? @@ -539,8 +543,7 @@ def to_dict(self, data=True): @property def dims(self): - """Tuple of dimension names with which this variable is associated. - """ + """Tuple of dimension names with which this variable is associated.""" return self._dims @dims.setter @@ -766,8 +769,7 @@ def __getitem__(self: VariableType, key) -> VariableType: return self._finalize_indexing_result(dims, data) def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType: - """Used by IndexVariable to return IndexVariable objects when possible. - """ + """Used by IndexVariable to return IndexVariable objects when possible.""" return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) def _getitem_with_mask(self, key, fill_value=dtypes.NA): @@ -843,8 +845,7 @@ def __setitem__(self, key, value): @property def attrs(self) -> Dict[Hashable, Any]: - """Dictionary of local attributes on this variable. - """ + """Dictionary of local attributes on this variable.""" if self._attrs is None: self._attrs = {} return self._attrs @@ -855,8 +856,7 @@ def attrs(self, value: Mapping[Hashable, Any]) -> None: @property def encoding(self): - """Dictionary of encodings on this variable. - """ + """Dictionary of encodings on this variable.""" if self._encoding is None: self._encoding = {} return self._encoding @@ -1768,8 +1768,7 @@ def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv): return self.equals(other, equiv=equiv) def identical(self, other, equiv=duck_array_ops.array_equiv): - """Like equals, but also checks attributes. - """ + """Like equals, but also checks attributes.""" try: return utils.dict_equiv(self.attrs, other.attrs) and self.equals( other, equiv=equiv @@ -2131,7 +2130,7 @@ def func(self, other): return func def _to_numeric(self, offset=None, datetime_unit=None, dtype=float): - """ A (private) method to convert datetime array to numeric dtype + """A (private) method to convert datetime array to numeric dtype See duck_array_ops.datetime_to_numeric """ numeric_array = duck_array_ops.datetime_to_numeric( diff --git a/xarray/core/weighted.py b/xarray/core/weighted.py index fa143342c06..96b4c79f245 100644 --- a/xarray/core/weighted.py +++ b/xarray/core/weighted.py @@ -118,7 +118,7 @@ def _reduce( ) -> "DataArray": """reduce using dot; equivalent to (da * weights).sum(dim, skipna) - for internal use only + for internal use only """ # need to infer dims as we use `dot` diff --git a/xarray/plot/facetgrid.py b/xarray/plot/facetgrid.py index 5cc187c909d..7860a082ab2 100644 --- a/xarray/plot/facetgrid.py +++ b/xarray/plot/facetgrid.py @@ -410,8 +410,7 @@ def add_legend(self, **kwargs): self.fig.subplots_adjust(right=right) def add_colorbar(self, **kwargs): - """Draw a colorbar - """ + """Draw a colorbar""" kwargs = kwargs.copy() if self._cmap_extend is not None: kwargs.setdefault("extend", self._cmap_extend) diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py index 7454b464c64..6cd44dbc098 100644 --- a/xarray/plot/utils.py +++ b/xarray/plot/utils.py @@ -446,8 +446,8 @@ def get_axis(figsize=None, size=None, aspect=None, ax=None, **kwargs): def label_from_attrs(da, extra=""): - """ Makes informative labels if variable metadata (attrs) follows - CF conventions. """ + """Makes informative labels if variable metadata (attrs) follows + CF conventions.""" if da.attrs.get("long_name"): name = da.attrs["long_name"] diff --git a/xarray/testing.py b/xarray/testing.py index ec479ef09d4..4f5d9037a7d 100644 --- a/xarray/testing.py +++ b/xarray/testing.py @@ -322,7 +322,9 @@ def _assert_dataset_invariants(ds: Dataset): assert isinstance(ds._attrs, (type(None), dict)) -def _assert_internal_invariants(xarray_obj: Union[DataArray, Dataset, Variable],): +def _assert_internal_invariants( + xarray_obj: Union[DataArray, Dataset, Variable], +): """Validate that an xarray object satisfies its own internal invariants. This exists for the benefit of xarray's own test suite, but may be useful diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index 5df783e4878..63655464a45 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -473,10 +473,13 @@ def test_unified_dim_sizes(): "x": 1, "y": 2, } - assert unified_dim_sizes( - [xr.Variable(("x", "z"), [[1]]), xr.Variable(("y", "z"), [[1, 2], [3, 4]])], - exclude_dims={"z"}, - ) == {"x": 1, "y": 2} + assert ( + unified_dim_sizes( + [xr.Variable(("x", "z"), [[1]]), xr.Variable(("y", "z"), [[1, 2], [3, 4]])], + exclude_dims={"z"}, + ) + == {"x": 1, "y": 2} + ) # duplicate dimensions with pytest.raises(ValueError): @@ -870,7 +873,10 @@ def test_vectorize_dask_dtype_without_output_dtypes(data_array): expected = data_array.copy() actual = apply_ufunc( - identity, data_array.chunk({"x": 1}), vectorize=True, dask="parallelized", + identity, + data_array.chunk({"x": 1}), + vectorize=True, + dask="parallelized", ) assert_identical(expected, actual) @@ -1073,7 +1079,8 @@ def np_corr(ts1, ts2): @pytest.mark.parametrize( - "da_a, da_b", arrays_w_tuples()[1], + "da_a, da_b", + arrays_w_tuples()[1], ) @pytest.mark.parametrize("dim", [None, "time", "x"]) def test_covcorr_consistency(da_a, da_b, dim): @@ -1093,7 +1100,8 @@ def test_covcorr_consistency(da_a, da_b, dim): @pytest.mark.parametrize( - "da_a", arrays_w_tuples()[0], + "da_a", + arrays_w_tuples()[0], ) @pytest.mark.parametrize("dim", [None, "time", "x", ["time", "x"]]) def test_autocov(da_a, dim): diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index caeb7ad4dc8..358ea731b90 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -36,9 +36,9 @@ class CountingScheduler: - """ Simple dask scheduler counting the number of computes. + """Simple dask scheduler counting the number of computes. - Reference: https://stackoverflow.com/questions/53289286/ """ + Reference: https://stackoverflow.com/questions/53289286/""" def __init__(self, max_computes=0): self.total_computes = 0 diff --git a/xarray/tests/test_duck_array_ops.py b/xarray/tests/test_duck_array_ops.py index 6db0b6eef87..bdbf35f4e14 100644 --- a/xarray/tests/test_duck_array_ops.py +++ b/xarray/tests/test_duck_array_ops.py @@ -257,7 +257,7 @@ def from_series_or_scalar(se): def series_reduce(da, func, dim, **kwargs): - """ convert DataArray to pd.Series, apply pd.func, then convert back to + """convert DataArray to pd.Series, apply pd.func, then convert back to a DataArray. Multiple dims cannot be specified.""" if dim is None or da.ndim == 1: se = da.to_series() diff --git a/xarray/tests/test_interp.py b/xarray/tests/test_interp.py index ce270fbf6a4..9851f2cddce 100644 --- a/xarray/tests/test_interp.py +++ b/xarray/tests/test_interp.py @@ -845,13 +845,15 @@ def test_interpolate_chunk_advanced(method): theta = np.linspace(0, 2 * np.pi, 5) w = np.linspace(-0.25, 0.25, 7) r = xr.DataArray( - data=1 + w[:, np.newaxis] * np.cos(theta), coords=[("w", w), ("theta", theta)], + data=1 + w[:, np.newaxis] * np.cos(theta), + coords=[("w", w), ("theta", theta)], ) x = r * np.cos(theta) y = r * np.sin(theta) z = xr.DataArray( - data=w[:, np.newaxis] * np.sin(theta), coords=[("w", w), ("theta", theta)], + data=w[:, np.newaxis] * np.sin(theta), + coords=[("w", w), ("theta", theta)], ) kwargs = {"fill_value": None} diff --git a/xarray/tests/test_plot.py b/xarray/tests/test_plot.py index 615c31f9d21..1dfbb97d012 100644 --- a/xarray/tests/test_plot.py +++ b/xarray/tests/test_plot.py @@ -59,10 +59,10 @@ def figure_context(*args, **kwargs): def test_all_figures_closed(): """meta-test to ensure all figures are closed at the end of a test - Notes: Scope is kept to module (only invoke this function once per test - module) else tests cannot be run in parallel (locally). Disadvantage: only - catches one open figure per run. May still give a false positive if tests - are run in parallel. + Notes: Scope is kept to module (only invoke this function once per test + module) else tests cannot be run in parallel (locally). Disadvantage: only + catches one open figure per run. May still give a false positive if tests + are run in parallel. """ yield None diff --git a/xarray/tests/test_units.py b/xarray/tests/test_units.py index 619fa10116d..525c1e8fc33 100644 --- a/xarray/tests/test_units.py +++ b/xarray/tests/test_units.py @@ -275,7 +275,7 @@ def merge_args(default_args, new_args): class method: - """ wrapper class to help with passing methods via parametrize + """wrapper class to help with passing methods via parametrize This is works a bit similar to using `partial(Class.method, arg, kwarg)` """ @@ -325,7 +325,7 @@ def __repr__(self): class function: - """ wrapper class for numpy functions + """wrapper class for numpy functions Same as method, but the name is used for referencing numpy functions """ @@ -624,7 +624,9 @@ def test_align_dataset(value, unit, variant, error, dtype): units_a = extract_units(ds1) units_b = extract_units(ds2) expected_a, expected_b = func( - strip_units(ds1), strip_units(convert_units(ds2, units_a)), **stripped_kwargs, + strip_units(ds1), + strip_units(convert_units(ds2, units_a)), + **stripped_kwargs, ) expected_a = attach_units(expected_a, units_a) if isinstance(array2, Quantity): @@ -1735,7 +1737,10 @@ def test_missing_value_fillna(self, unit, error): pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), - pytest.param(unit_registry.cm, id="compatible_unit",), + pytest.param( + unit_registry.cm, + id="compatible_unit", + ), pytest.param(unit_registry.m, id="identical_unit"), ), ) @@ -2186,7 +2191,8 @@ def test_pad(self, mode, xr_arg, np_arg): v = xr.Variable(["x", "y", "z"], data) expected = attach_units( - strip_units(v).pad(mode=mode, **xr_arg), extract_units(v), + strip_units(v).pad(mode=mode, **xr_arg), + extract_units(v), ) actual = v.pad(mode=mode, **xr_arg) @@ -2918,8 +2924,16 @@ def test_interpolate_na(self): unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), - pytest.param(unit_registry.cm, None, id="compatible_unit",), - pytest.param(unit_registry.m, None, id="identical_unit",), + pytest.param( + unit_registry.cm, + None, + id="compatible_unit", + ), + pytest.param( + unit_registry.m, + None, + id="identical_unit", + ), ), ) def test_combine_first(self, unit, error, dtype): @@ -3471,7 +3485,9 @@ def test_interp_reindex(self, variant, func, dtype): ), ) @pytest.mark.parametrize( - "func", (method("interp"), method("reindex")), ids=repr, + "func", + (method("interp"), method("reindex")), + ids=repr, ) def test_interp_reindex_indexing(self, func, unit, error, dtype): array = np.linspace(1, 2, 10).astype(dtype) @@ -3545,7 +3561,9 @@ def test_interp_reindex_like(self, variant, func, dtype): ), ) @pytest.mark.parametrize( - "func", (method("interp_like"), method("reindex_like")), ids=repr, + "func", + (method("interp_like"), method("reindex_like")), + ids=repr, ) def test_interp_reindex_like_indexing(self, func, unit, error, dtype): array = np.linspace(1, 2, 10).astype(dtype) @@ -3927,7 +3945,8 @@ def test_init(self, shared, unit, error, dtype): ( "data", pytest.param( - "dims", marks=pytest.mark.xfail(reason="indexes don't support units"), + "dims", + marks=pytest.mark.xfail(reason="indexes don't support units"), ), "coords", ), @@ -4195,7 +4214,11 @@ def test_missing_value_filling(self, func, dtype): unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), - pytest.param(unit_registry.cm, None, id="compatible_unit",), + pytest.param( + unit_registry.cm, + None, + id="compatible_unit", + ), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @@ -4340,7 +4363,10 @@ def test_where(self, variant, unit, error, dtype): for key, value in kwargs.items() } - expected = attach_units(strip_units(ds).where(**kwargs_without_units), units,) + expected = attach_units( + strip_units(ds).where(**kwargs_without_units), + units, + ) actual = ds.where(**kwargs) assert_units_equal(expected, actual) @@ -4359,7 +4385,10 @@ def test_interpolate_na(self, dtype): ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}) units = extract_units(ds) - expected = attach_units(strip_units(ds).interpolate_na(dim="x"), units,) + expected = attach_units( + strip_units(ds).interpolate_na(dim="x"), + units, + ) actual = ds.interpolate_na(dim="x") assert_units_equal(expected, actual) @@ -4382,7 +4411,8 @@ def test_interpolate_na(self, dtype): ( "data", pytest.param( - "dims", marks=pytest.mark.xfail(reason="indexes don't support units"), + "dims", + marks=pytest.mark.xfail(reason="indexes don't support units"), ), ), ) @@ -4401,7 +4431,8 @@ def test_combine_first(self, variant, unit, error, dtype): ) x = np.arange(len(array1)) * dims_unit ds = xr.Dataset( - data_vars={"a": ("x", array1), "b": ("x", array2)}, coords={"x": x}, + data_vars={"a": ("x", array1), "b": ("x", array2)}, + coords={"x": x}, ) units = extract_units(ds) @@ -4478,7 +4509,8 @@ def test_comparisons(self, func, variant, unit, dtype): y = coord * coord_unit ds = xr.Dataset( - data_vars={"a": ("x", a), "b": ("x", b)}, coords={"x": x, "y": ("x", y)}, + data_vars={"a": ("x", a), "b": ("x", b)}, + coords={"x": x, "y": ("x", y)}, ) units = extract_units(ds) @@ -4535,7 +4567,8 @@ def test_comparisons(self, func, variant, unit, dtype): ( "data", pytest.param( - "dims", marks=pytest.mark.xfail(reason="indexes don't support units"), + "dims", + marks=pytest.mark.xfail(reason="indexes don't support units"), ), ), ) @@ -4626,7 +4659,8 @@ def test_pad(self, dtype): ( "data", pytest.param( - "dims", marks=pytest.mark.xfail(reason="indexes don't support units"), + "dims", + marks=pytest.mark.xfail(reason="indexes don't support units"), ), ), ) @@ -4677,7 +4711,10 @@ def test_to_stacked_array(self, dtype): func = method("to_stacked_array", "z", variable_dim="y", sample_dims=["x"]) actual = func(ds).rename(None) - expected = attach_units(func(strip_units(ds)).rename(None), units,) + expected = attach_units( + func(strip_units(ds)).rename(None), + units, + ) assert_units_equal(expected, actual) assert_equal(expected, actual) diff --git a/xarray/util/print_versions.py b/xarray/util/print_versions.py index 96983c83aab..d643d768093 100755 --- a/xarray/util/print_versions.py +++ b/xarray/util/print_versions.py @@ -78,7 +78,7 @@ def netcdf_and_hdf5_versions(): def show_versions(file=sys.stdout): - """ print the versions of xarray and its dependencies + """print the versions of xarray and its dependencies Parameters ----------