Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump black to 23.1.0 #9956

Merged
merged 1 commit into from Feb 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Expand Up @@ -21,7 +21,7 @@ repos:
args:
- --py38-plus
- repo: https://github.com/psf/black
rev: 22.10.0
rev: 23.1.0
hooks:
- id: black
language_version: python3
Expand Down
1 change: 0 additions & 1 deletion dask/array/backends.py
Expand Up @@ -152,7 +152,6 @@ def _cupy_einsum(*args, **kwargs):
@tensordot_lookup.register_lazy("cupyx")
@concatenate_lookup.register_lazy("cupyx")
def register_cupyx():

from cupyx.scipy.sparse import spmatrix

try:
Expand Down
2 changes: 1 addition & 1 deletion dask/array/core.py
Expand Up @@ -5304,7 +5304,7 @@ def dtype(x):

result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))

for (idx, arr) in zip(
for idx, arr in zip(
slices_from_chunks(chunks), core.flatten(arrays, container=(list, tuple))
):
if hasattr(arr, "ndim"):
Expand Down
1 change: 0 additions & 1 deletion dask/array/optimization.py
Expand Up @@ -340,7 +340,6 @@ def fuse_slice(a, b):
# If given two tuples walk through both, being mindful of uneven sizes
# and newaxes
if isinstance(a, tuple) and isinstance(b, tuple):

# Check for non-fusible cases with fancy-indexing
a_has_lists = any(isinstance(item, list) for item in a)
b_has_lists = any(isinstance(item, list) for item in b)
Expand Down
3 changes: 0 additions & 3 deletions dask/array/percentile.py
Expand Up @@ -45,7 +45,6 @@ def _percentile(a, q, method="linear"):


def _tdigest_chunk(a):

from crick import TDigest

t = TDigest()
Expand All @@ -55,7 +54,6 @@ def _tdigest_chunk(a):


def _percentiles_from_tdigest(qs, digests):

from crick import TDigest

t = TDigest()
Expand Down Expand Up @@ -154,7 +152,6 @@ def percentile(a, q, method="linear", internal_method="default", **kwargs):
and method == "linear"
and (np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.integer))
):

from dask.utils import import_required

import_required(
Expand Down
1 change: 0 additions & 1 deletion dask/array/routines.py
Expand Up @@ -1948,7 +1948,6 @@ def squeeze(a, axis=None):

@derived_from(np)
def compress(condition, a, axis=None):

if not is_arraylike(condition):
# Allow `condition` to be anything array-like, otherwise ensure `condition`
# is a numpy array.
Expand Down
2 changes: 0 additions & 2 deletions dask/array/slicing.py
Expand Up @@ -1889,7 +1889,6 @@ def value_indices_from_1d_int_index(dim, vsize, loc0, loc1):
dsk = {}
out_name = (out_name,)
for in_key, locations in zip(in_keys, array_locations):

# Now loop round each block dimension.
#
# If the block overlaps the indices then set the following
Expand Down Expand Up @@ -1922,7 +1921,6 @@ def value_indices_from_1d_int_index(dim, vsize, loc0, loc1):
dim_1d_int_index = None

for dim, (index, (loc0, loc1)) in enumerate(zip(indices, locations)):

integer_index = isinstance(index, int)
if isinstance(index, slice):
# Index is a slice
Expand Down
2 changes: 0 additions & 2 deletions dask/array/stats.py
Expand Up @@ -150,7 +150,6 @@ def chisquare(f_obs, f_exp=None, ddof=0, axis=0):

@derived_from(scipy.stats)
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):

if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
Expand Down Expand Up @@ -405,7 +404,6 @@ def _unequal_var_ttest_denom(v1, n1, v2, n2):


def _ttest_ind_from_stats(mean1, mean2, denom, df):

d = mean1 - mean2
with np.errstate(divide="ignore", invalid="ignore"):
t = da.divide(d, denom)
Expand Down
6 changes: 1 addition & 5 deletions dask/array/tests/test_array_core.py
Expand Up @@ -607,7 +607,6 @@ def test_concatenate_fixlen_strings():


def test_concatenate_zero_size():

x = np.random.random(10)
y = da.from_array(x, chunks=3)
result_np = np.concatenate([x, x[:0]])
Expand Down Expand Up @@ -2650,9 +2649,7 @@ def test_from_array_scalar(type_):
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(
dx.dask[
dx.name,
],
dx.dask[dx.name,],
np.ndarray,
)

Expand Down Expand Up @@ -5332,7 +5329,6 @@ def test_chunk_non_array_like():
def test_to_backend():
# Test that `Array.to_backend` works as expected
with dask.config.set({"array.backend": "numpy"}):

# Start with numpy-backed array
x = da.ones(10)
assert isinstance(x._meta, np.ndarray)
Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_creation.py
Expand Up @@ -40,7 +40,6 @@ def test_arr_like(
):
backend_lib = pytest.importorskip(backend)
with dask.config.set({"array.backend": backend}):

np_func = getattr(backend_lib, funcname)
da_func = getattr(da, funcname)
shape = cast_shape(shape)
Expand Down
2 changes: 0 additions & 2 deletions dask/array/tests/test_cupy_creation.py
Expand Up @@ -173,7 +173,6 @@ def test_tri_like(xp, N, M, k, dtype, chunks):
def test_to_backend_cupy():
# Test that `Array.to_backend` works as expected
with config.set({"array.backend": "numpy"}):

# Start with cupy-backed array
x = da.from_array(cupy.arange(11), chunks=(4,))
assert isinstance(x._meta, cupy.ndarray)
Expand All @@ -190,7 +189,6 @@ def test_to_backend_cupy():

# Change global "array.backend" config to `cupy`
with config.set({"array.backend": "cupy"}):

# Calling `to_backend("numpy")` should
# always move the data to `numpy`
x_new = x.to_backend("numpy")
Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_linalg.py
Expand Up @@ -815,7 +815,6 @@ def test_solve_assume_a(shape, chunk):

@pytest.mark.parametrize(("shape", "chunk"), [(20, 10), (12, 3), (30, 3), (30, 6)])
def test_cholesky(shape, chunk):

A = _get_symmat(shape)
dA = da.from_array(A, (chunk, chunk))
assert_eq(
Expand Down
2 changes: 1 addition & 1 deletion dask/array/tests/test_masked.py
Expand Up @@ -171,7 +171,7 @@ def test_creation_functions():
dy = da.from_array(y, chunks=4)

sol = np.ma.masked_greater(x, y)
for (a, b) in product([dx, x], [dy, y]):
for a, b in product([dx, x], [dy, y]):
assert_eq(da.ma.masked_greater(a, b), sol)

# These are all the same as masked_greater, just check for correct op
Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_optimization.py
Expand Up @@ -106,7 +106,6 @@ def _assert_getter_dsk_eq(a, b):


def test_fuse_getitem(getter, getter_nofancy, getitem):

pairs = [
(
(getter, (getter, "x", slice(1000, 2000)), slice(15, 20)),
Expand Down
2 changes: 1 addition & 1 deletion dask/array/tests/test_random.py
Expand Up @@ -274,7 +274,7 @@ def test_choice():
(4, [0.2, 0.2, 0.3]),
] # p must sum to 1

for (a, p) in errs:
for a, p in errs:
with pytest.raises(ValueError):
da.random.choice(a, size=size, chunks=chunks, p=p)

Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_rechunk.py
Expand Up @@ -539,7 +539,6 @@ def test_intersect_nan_single():


def test_intersect_nan_long():

old_chunks = (tuple([float("nan")] * 4), (10,))
new_chunks = (tuple([float("nan")] * 4), (5, 5))
result = list(intersect_chunks(old_chunks, new_chunks))
Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_reductions.py
Expand Up @@ -286,7 +286,6 @@ def test_nan_reduction_warnings(dfunc, func):
["dfunc", "func"], [(da.nanargmin, np.nanargmin), (da.nanargmax, np.nanargmax)]
)
def test_nanarg_reductions(dfunc, func):

x = np.random.random((10, 10, 10))
x[5] = np.nan
a = da.from_array(x, chunks=(3, 4, 5))
Expand Down
1 change: 0 additions & 1 deletion dask/array/tests/test_routines.py
Expand Up @@ -2178,7 +2178,6 @@ def test_coarsen_bad_chunks(chunks):
],
)
def test_aligned_coarsen_chunks(chunks, divisor):

from dask.array.routines import aligned_coarsen_chunks as acc

aligned_chunks = acc(chunks, divisor)
Expand Down
2 changes: 0 additions & 2 deletions dask/array/tests/test_ufunc.py
Expand Up @@ -252,7 +252,6 @@ def test_ufunc_outer():

@pytest.mark.parametrize("ufunc", ["isreal", "iscomplex", "real", "imag"])
def test_complex(ufunc):

dafunc = getattr(da, ufunc)
# Note that these functions are not NumPy ufuncs
npfunc = getattr(np, ufunc)
Expand Down Expand Up @@ -282,7 +281,6 @@ def test_complex(ufunc):

@pytest.mark.parametrize("ufunc", ["frexp", "modf"])
def test_ufunc_2results(ufunc):

dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)

Expand Down
3 changes: 1 addition & 2 deletions dask/blockwise.py
Expand Up @@ -734,7 +734,7 @@ def _cull_dependencies(self, all_hlg_keys, output_blocks):

# Gather constant dependencies (for all output keys)
const_deps = set()
for (arg, ind) in self.indices:
for arg, ind in self.indices:
if ind is None:
try:
if arg in all_hlg_keys:
Expand Down Expand Up @@ -1174,7 +1174,6 @@ def make_blockwise_graph(
key_deps[out_key] = deps

if return_key_deps:

# Add valid-key dependencies from io_deps
for key, io_dep in io_deps.items():
if io_dep.produces_keys:
Expand Down
1 change: 0 additions & 1 deletion dask/bytes/tests/test_local.py
Expand Up @@ -47,7 +47,6 @@ def to_uri(path):


def test_unordered_urlpath_errors():

# Unordered urlpath argument
with pytest.raises(TypeError):
read_bytes(
Expand Down
1 change: 0 additions & 1 deletion dask/bytes/tests/test_s3.py
Expand Up @@ -492,7 +492,6 @@ def test_parquet(s3, engine, s3so, metadata_file):
# Check that `open_file_options` arguments are
# really passed through to fsspec
if fsspec_parquet:

# Passing `open_file_options` kwargs will fail
# if you set an unsupported engine
with pytest.raises(ValueError):
Expand Down
1 change: 0 additions & 1 deletion dask/dataframe/accessor.py
Expand Up @@ -291,7 +291,6 @@ def cat(self, others=None, sep=None, na_rep=None):
if others is None:

def str_cat_none(x):

if isinstance(x, (Series, Index)):
x = x.compute()

Expand Down
6 changes: 0 additions & 6 deletions dask/dataframe/core.py
Expand Up @@ -2948,7 +2948,6 @@ def describe(
datetime_is_numeric_kwarg = {}

if self._meta.ndim == 1:

meta = self._meta_nonempty.describe(
percentiles=percentiles,
include=include,
Expand Down Expand Up @@ -3772,7 +3771,6 @@ def rename(self, index=None, inplace=False, sorted_index=False):
and not is_dict_like(index)
and not isinstance(index, dd.Series)
):

if inplace:
warnings.warn(
"'inplace' argument for dask series will be removed in future versions",
Expand Down Expand Up @@ -4695,7 +4693,6 @@ def empty(self):
def __getitem__(self, key):
name = "getitem-%s" % tokenize(self, key)
if np.isscalar(key) or isinstance(key, (tuple, str)):

if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
if PANDAS_GT_120:
Expand Down Expand Up @@ -5032,7 +5029,6 @@ def set_index(

# Check other can be translated to column name or column object, possibly flattening it
if not isinstance(other, str):

# It may refer to several columns
if isinstance(other, Sequence): # type: ignore[unreachable]
# Accept ["a"], but not [["a"]]
Expand Down Expand Up @@ -7070,7 +7066,6 @@ def quantile(df, q, method="default"):
if internal_method == "tdigest" and (
np.issubdtype(df.dtype, np.floating) or np.issubdtype(df.dtype, np.integer)
):

from dask.utils import import_required

import_required(
Expand All @@ -7090,7 +7085,6 @@ def quantile(df, q, method="default"):
(name2, 0): finalize_tsk((_percentiles_from_tdigest, qs, sorted(val_dsk)))
}
else:

from dask.array.dispatch import percentile_lookup as _percentile
from dask.array.percentile import merge_percentiles

Expand Down
3 changes: 1 addition & 2 deletions dask/dataframe/groupby.py
Expand Up @@ -940,7 +940,7 @@ def _build_agg_args(spec):

# a partial may contain some arguments, pass them down
# https://github.com/dask/dask/issues/9615
for (result_column, func, input_column) in spec:
for result_column, func, input_column in spec:
func_args = ()
func_kwargs = {}
if isinstance(func, partial):
Expand Down Expand Up @@ -1389,7 +1389,6 @@ def __init__(
sort=True,
observed=False,
):

by_ = by if isinstance(by, (tuple, list)) else [by]
if any(isinstance(key, pd.Grouper) for key in by_):
raise NotImplementedError("pd.Grouper is currently not supported by Dask.")
Expand Down
2 changes: 0 additions & 2 deletions dask/dataframe/indexing.py
Expand Up @@ -47,7 +47,6 @@ def _meta_indexer(self):
return self.obj._meta.iloc

def __getitem__(self, key):

# dataframe
msg = (
"'DataFrame.iloc' only supports selecting columns. "
Expand Down Expand Up @@ -87,7 +86,6 @@ def _meta_indexer(self):
return self.obj._meta.loc

def __getitem__(self, key):

if isinstance(key, tuple):
# multi-dimensional selection
if len(key) > self.obj.ndim:
Expand Down
1 change: 0 additions & 1 deletion dask/dataframe/io/csv.py
Expand Up @@ -97,7 +97,6 @@ def project_columns(self, columns):
)

def __call__(self, part):

# Part will be a 3-element tuple
block, path, is_first, is_last = part

Expand Down
2 changes: 0 additions & 2 deletions dask/dataframe/io/hdf.py
Expand Up @@ -470,13 +470,11 @@ def _build_parts(paths, key, start, stop, chunksize, sorted_index, mode):
parts = []
global_divisions = []
for path in paths:

keys, stops, divisions = _get_keys_stops_divisions(
path, key, stop, sorted_index, chunksize, mode
)

for k, s, d in zip(keys, stops, divisions):

if d and global_divisions:
global_divisions = global_divisions[:-1] + d
elif d:
Expand Down
1 change: 0 additions & 1 deletion dask/dataframe/io/orc/arrow.py
Expand Up @@ -16,7 +16,6 @@ def read_metadata(
aggregate_files,
**kwargs,
):

# Convert root directory to file list.
# TODO: Handle hive-partitioned data
if len(paths) == 1 and not fs.isfile(paths[0]):
Expand Down