From 9088a36ee445bde38a6631e92c2118e9913eda75 Mon Sep 17 00:00:00 2001 From: Anh Trinh Date: Thu, 7 Mar 2024 18:20:13 +0100 Subject: [PATCH 1/4] Bump ruff to latest version --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 201820c6a8b28..190ea32203807 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,7 +19,7 @@ ci: skip: [pylint, pyright, mypy] repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.13 + rev: v0.3.1 hooks: - id: ruff args: [--exit-non-zero-on-fix] From e44287bad66a24fb6f0f23f9accaa8b7ccd9dfec Mon Sep 17 00:00:00 2001 From: Anh Trinh Date: Thu, 7 Mar 2024 18:21:25 +0100 Subject: [PATCH 2/4] Autoformat --- asv_bench/benchmarks/indexing.py | 1 + asv_bench/benchmarks/libs.py | 1 + asv_bench/benchmarks/package.py | 1 + asv_bench/benchmarks/period.py | 1 + asv_bench/benchmarks/tslibs/offsets.py | 1 + asv_bench/benchmarks/tslibs/resolution.py | 1 + asv_bench/benchmarks/tslibs/timedelta.py | 1 + asv_bench/benchmarks/tslibs/tslib.py | 1 + doc/make.py | 1 + pandas/_config/__init__.py | 1 + pandas/_config/config.py | 6 +- pandas/_config/dates.py | 1 + pandas/_config/localization.py | 1 + pandas/_testing/_hypothesis.py | 1 + pandas/_testing/compat.py | 1 + pandas/_typing.py | 30 +- pandas/_version.py | 6 +- pandas/api/__init__.py | 3 +- pandas/arrays/__init__.py | 1 + pandas/compat/__init__.py | 1 + pandas/compat/_optional.py | 6 +- pandas/compat/numpy/__init__.py | 3 +- pandas/compat/numpy/function.py | 7 +- pandas/compat/pickle_compat.py | 1 + pandas/compat/pyarrow.py | 2 +- pandas/conftest.py | 1 + pandas/core/_numba/kernels/mean_.py | 1 + pandas/core/_numba/kernels/min_max_.py | 1 + pandas/core/_numba/kernels/sum_.py | 1 + pandas/core/_numba/kernels/var_.py | 1 + pandas/core/accessor.py | 1 + pandas/core/algorithms.py | 1 + pandas/core/apply.py | 7 +- pandas/core/array_algos/masked_reductions.py | 1 + pandas/core/array_algos/putmask.py | 1 + pandas/core/array_algos/replace.py | 1 + pandas/core/array_algos/take.py | 6 +- pandas/core/arraylike.py | 1 + pandas/core/arrays/_mixins.py | 6 +- pandas/core/arrays/_ranges.py | 1 + pandas/core/arrays/arrow/array.py | 12 +- pandas/core/arrays/base.py | 28 +- pandas/core/arrays/categorical.py | 21 +- pandas/core/arrays/datetimelike.py | 24 +- pandas/core/arrays/datetimes.py | 6 +- pandas/core/arrays/interval.py | 6 +- pandas/core/arrays/masked.py | 27 +- pandas/core/arrays/period.py | 6 +- pandas/core/arrays/sparse/accessor.py | 1 + pandas/core/arrays/sparse/array.py | 13 +- pandas/core/arrays/sparse/scipy_sparse.py | 1 + pandas/core/base.py | 6 +- pandas/core/common.py | 16 +- pandas/core/computation/align.py | 1 + pandas/core/computation/engines.py | 1 + pandas/core/computation/eval.py | 1 + pandas/core/computation/expr.py | 1 + pandas/core/computation/expressions.py | 1 + pandas/core/computation/parsing.py | 1 + pandas/core/computation/pytables.py | 3 +- pandas/core/computation/scope.py | 1 + pandas/core/config_init.py | 1 + pandas/core/construction.py | 7 +- pandas/core/dtypes/astype.py | 7 +- pandas/core/dtypes/base.py | 16 +- pandas/core/dtypes/cast.py | 31 +- pandas/core/dtypes/common.py | 1 + pandas/core/dtypes/concat.py | 1 + pandas/core/dtypes/dtypes.py | 1 + pandas/core/dtypes/generic.py | 3 +- pandas/core/dtypes/inference.py | 2 +- pandas/core/dtypes/missing.py | 35 +-- pandas/core/frame.py | 269 ++++++------------ pandas/core/generic.py | 168 ++++------- pandas/core/groupby/base.py | 1 + pandas/core/groupby/generic.py | 1 + pandas/core/groupby/groupby.py | 7 +- pandas/core/groupby/grouper.py | 1 + pandas/core/groupby/numba_.py | 1 + pandas/core/groupby/ops.py | 1 + pandas/core/indexers/objects.py | 1 + pandas/core/indexers/utils.py | 1 + pandas/core/indexes/accessors.py | 1 + pandas/core/indexes/base.py | 44 ++- pandas/core/indexes/datetimelike.py | 4 +- pandas/core/indexes/extension.py | 1 + pandas/core/indexes/interval.py | 3 +- pandas/core/indexes/range.py | 9 +- pandas/core/indexes/timedeltas.py | 3 +- pandas/core/interchange/from_dataframe.py | 9 +- pandas/core/internals/api.py | 1 + pandas/core/internals/construction.py | 1 + pandas/core/methods/describe.py | 1 + pandas/core/methods/to_dict.py | 12 +- pandas/core/missing.py | 7 +- pandas/core/ops/__init__.py | 1 + pandas/core/ops/array_ops.py | 1 + pandas/core/ops/common.py | 1 + pandas/core/ops/dispatch.py | 1 + pandas/core/ops/docstrings.py | 13 +- pandas/core/ops/invalid.py | 1 + pandas/core/ops/mask_ops.py | 1 + pandas/core/ops/missing.py | 1 + pandas/core/resample.py | 12 +- pandas/core/reshape/concat.py | 16 +- pandas/core/reshape/merge.py | 1 + pandas/core/reshape/reshape.py | 6 +- pandas/core/reshape/tile.py | 1 + pandas/core/roperator.py | 1 + pandas/core/sample.py | 1 + pandas/core/series.py | 109 +++---- pandas/core/sorting.py | 3 +- pandas/core/tools/datetimes.py | 9 +- pandas/core/tools/timedeltas.py | 10 +- pandas/core/util/hashing.py | 1 + pandas/core/util/numba_.py | 1 + pandas/core/window/common.py | 1 + pandas/core/window/doc.py | 1 + pandas/core/window/rolling.py | 1 + pandas/errors/__init__.py | 1 + pandas/io/clipboards.py | 3 +- pandas/io/common.py | 27 +- pandas/io/excel/_base.py | 8 +- pandas/io/excel/_odswriter.py | 6 +- pandas/io/excel/_util.py | 18 +- pandas/io/feather_format.py | 3 +- pandas/io/formats/console.py | 1 + pandas/io/formats/css.py | 1 + pandas/io/formats/excel.py | 1 + pandas/io/formats/format.py | 1 + pandas/io/formats/html.py | 1 + pandas/io/formats/printing.py | 1 + pandas/io/formats/string.py | 1 + pandas/io/formats/style.py | 25 +- pandas/io/formats/style_render.py | 18 +- pandas/io/formats/xml.py | 1 + pandas/io/json/_json.py | 38 +-- pandas/io/json/_normalize.py | 6 +- pandas/io/json/_table_schema.py | 1 + pandas/io/orc.py | 3 +- pandas/io/parquet.py | 3 +- pandas/io/parsers/arrow_parser_wrapper.py | 6 +- pandas/io/parsers/base_parser.py | 12 +- pandas/io/parsers/readers.py | 49 ++-- pandas/io/pickle.py | 3 +- pandas/io/pytables.py | 1 + pandas/io/sas/sas7bdat.py | 1 + pandas/io/sas/sas_constants.py | 60 ++-- pandas/io/sas/sas_xport.py | 1 + pandas/io/sas/sasreader.py | 13 +- pandas/io/sql.py | 18 +- pandas/io/stata.py | 1 + pandas/plotting/__init__.py | 1 + pandas/plotting/_matplotlib/style.py | 9 +- pandas/testing.py | 1 - pandas/tests/arithmetic/common.py | 1 + pandas/tests/arrays/interval/test_overlaps.py | 1 + .../tests/arrays/masked/test_arrow_compat.py | 2 +- pandas/tests/arrays/masked_shared.py | 1 + pandas/tests/arrays/numpy_/test_numpy.py | 1 + pandas/tests/arrays/string_/test_string.py | 1 + pandas/tests/arrays/test_datetimes.py | 1 + pandas/tests/arrays/test_ndarray_backed.py | 1 + pandas/tests/dtypes/test_inference.py | 4 +- .../tests/extension/array_with_attr/array.py | 1 + pandas/tests/extension/base/__init__.py | 1 + pandas/tests/extension/base/dim2.py | 1 + pandas/tests/extension/base/index.py | 1 + pandas/tests/extension/json/array.py | 1 + pandas/tests/extension/list/array.py | 1 + pandas/tests/extension/test_arrow.py | 1 + pandas/tests/extension/test_categorical.py | 1 + pandas/tests/extension/test_datetime.py | 1 + pandas/tests/extension/test_extension.py | 1 + pandas/tests/extension/test_interval.py | 1 + pandas/tests/extension/test_masked.py | 1 + pandas/tests/extension/test_numpy.py | 1 + pandas/tests/extension/test_period.py | 1 + pandas/tests/extension/test_string.py | 1 + pandas/tests/frame/indexing/test_coercion.py | 1 + pandas/tests/frame/indexing/test_insert.py | 1 + .../frame/methods/test_first_valid_index.py | 1 + pandas/tests/frame/methods/test_nlargest.py | 1 + pandas/tests/frame/test_npfuncs.py | 1 + pandas/tests/generic/test_duplicate_labels.py | 1 + pandas/tests/generic/test_finalize.py | 1 + .../tests/groupby/aggregate/test_aggregate.py | 1 + .../groupby/methods/test_value_counts.py | 1 - pandas/tests/groupby/test_grouping.py | 1 + pandas/tests/groupby/test_timegrouper.py | 1 + .../tests/groupby/transform/test_transform.py | 3 +- .../tests/indexes/base_class/test_reshape.py | 1 + .../tests/indexes/categorical/test_formats.py | 1 + .../indexes/datetimelike_/test_equals.py | 1 + .../indexes/datetimes/test_partial_slicing.py | 2 +- .../tests/indexes/datetimes/test_timezones.py | 1 + pandas/tests/indexes/test_any_index.py | 1 + pandas/tests/indexes/test_common.py | 1 + pandas/tests/indexes/test_datetimelike.py | 2 +- pandas/tests/indexes/test_index_new.py | 1 + pandas/tests/indexes/test_indexing.py | 1 + pandas/tests/indexes/test_setops.py | 1 + pandas/tests/indexes/test_subclass.py | 1 + pandas/tests/indexing/common.py | 3 +- pandas/tests/indexing/test_iloc.py | 2 +- pandas/tests/indexing/test_indexing.py | 2 +- pandas/tests/indexing/test_loc.py | 3 +- pandas/tests/indexing/test_scalar.py | 3 +- .../interchange/test_spec_conformance.py | 1 + pandas/tests/io/excel/test_writers.py | 18 +- pandas/tests/io/formats/test_format.py | 1 + pandas/tests/io/formats/test_to_excel.py | 1 + .../tests/io/json/test_deprecated_kwargs.py | 1 + .../tests/io/json/test_json_table_schema.py | 1 + .../tests/io/parser/common/test_chunksize.py | 1 + .../io/parser/common/test_common_basic.py | 1 + .../tests/io/parser/common/test_data_list.py | 1 + pandas/tests/io/parser/common/test_decimal.py | 1 + .../io/parser/common/test_file_buffer_url.py | 1 + pandas/tests/io/parser/common/test_float.py | 1 + pandas/tests/io/parser/common/test_index.py | 1 + pandas/tests/io/parser/common/test_inf.py | 1 + pandas/tests/io/parser/common/test_ints.py | 1 + .../tests/io/parser/common/test_iterator.py | 1 + .../io/parser/common/test_read_errors.py | 10 +- pandas/tests/io/parser/common/test_verbose.py | 1 + .../io/parser/dtypes/test_categorical.py | 1 + .../io/parser/dtypes/test_dtypes_basic.py | 1 + pandas/tests/io/parser/dtypes/test_empty.py | 1 + pandas/tests/io/parser/test_c_parser_only.py | 3 +- pandas/tests/io/parser/test_comment.py | 1 + pandas/tests/io/parser/test_converters.py | 1 + pandas/tests/io/parser/test_encoding.py | 1 + pandas/tests/io/parser/test_index_col.py | 1 + pandas/tests/io/parser/test_mangle_dupes.py | 1 + pandas/tests/io/parser/test_multi_thread.py | 1 + pandas/tests/io/parser/test_na_values.py | 1 + pandas/tests/io/parser/test_network.py | 1 + .../io/parser/test_python_parser_only.py | 1 + pandas/tests/io/parser/test_textreader.py | 1 + pandas/tests/io/parser/test_unsupported.py | 1 + .../io/parser/usecols/test_parse_dates.py | 1 + .../tests/io/parser/usecols/test_strings.py | 1 + .../io/parser/usecols/test_usecols_basic.py | 1 + pandas/tests/io/pytables/test_append.py | 5 +- pandas/tests/io/test_common.py | 1 + pandas/tests/io/test_feather.py | 3 +- pandas/tests/io/test_gcs.py | 14 +- pandas/tests/io/test_html.py | 3 +- pandas/tests/io/test_http_headers.py | 1 + pandas/tests/io/test_orc.py | 3 +- pandas/tests/io/test_parquet.py | 3 +- pandas/tests/io/test_pickle.py | 1 + pandas/tests/io/test_stata.py | 4 +- pandas/tests/plotting/frame/test_frame.py | 5 +- .../tests/plotting/frame/test_frame_color.py | 3 +- .../plotting/frame/test_frame_groupby.py | 2 +- .../plotting/frame/test_frame_subplots.py | 2 +- pandas/tests/plotting/test_boxplot_method.py | 2 +- pandas/tests/plotting/test_datetimelike.py | 3 +- pandas/tests/plotting/test_groupby.py | 3 +- pandas/tests/plotting/test_hist_method.py | 3 +- pandas/tests/plotting/test_misc.py | 3 +- pandas/tests/plotting/test_series.py | 3 +- .../tests/reductions/test_stat_reductions.py | 1 + .../tests/scalar/timedelta/test_arithmetic.py | 1 + .../tests/scalar/timedelta/test_timedelta.py | 3 +- .../tests/scalar/timestamp/test_timestamp.py | 2 +- .../tests/scalar/timestamp/test_timezones.py | 1 + pandas/tests/series/indexing/test_datetime.py | 1 + pandas/tests/series/indexing/test_getitem.py | 1 + pandas/tests/series/indexing/test_indexing.py | 3 +- pandas/tests/series/methods/test_isna.py | 1 + pandas/tests/series/methods/test_item.py | 1 + pandas/tests/series/methods/test_nlargest.py | 1 + pandas/tests/series/methods/test_set_name.py | 2 +- pandas/tests/series/test_constructors.py | 2 +- pandas/tests/series/test_formats.py | 6 +- pandas/tests/test_downstream.py | 1 + pandas/tests/tools/test_to_datetime.py | 2 +- pandas/tests/tseries/offsets/common.py | 1 + .../tseries/offsets/test_business_day.py | 1 + .../tseries/offsets/test_business_hour.py | 1 + .../tseries/offsets/test_business_month.py | 1 + .../tseries/offsets/test_business_quarter.py | 1 + .../tseries/offsets/test_business_year.py | 1 + .../offsets/test_custom_business_day.py | 1 + .../offsets/test_custom_business_hour.py | 1 + .../offsets/test_custom_business_month.py | 1 + pandas/tests/tseries/offsets/test_dst.py | 1 + pandas/tests/tseries/offsets/test_easter.py | 1 + pandas/tests/tseries/offsets/test_fiscal.py | 1 + pandas/tests/tseries/offsets/test_index.py | 1 + pandas/tests/tseries/offsets/test_month.py | 1 + pandas/tests/tseries/offsets/test_offsets.py | 1 + .../offsets/test_offsets_properties.py | 1 + pandas/tests/tseries/offsets/test_quarter.py | 1 + pandas/tests/tseries/offsets/test_ticks.py | 1 + pandas/tests/tseries/offsets/test_week.py | 1 + pandas/tests/tseries/offsets/test_year.py | 1 + pandas/tests/tslibs/test_liboffsets.py | 1 + pandas/tests/tslibs/test_parsing.py | 1 + .../util/test_assert_produces_warning.py | 3 +- pandas/util/_test_decorators.py | 1 + pandas/util/_tester.py | 1 + pandas/util/_validators.py | 7 +- web/pandas_web.py | 1 + 307 files changed, 810 insertions(+), 932 deletions(-) diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 86da26bead64d..15e691d46f693 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -3,6 +3,7 @@ lower-level methods directly on Index and subclasses, see index_object.py, indexing_engine.py, and index_cached.py """ + from datetime import datetime import warnings diff --git a/asv_bench/benchmarks/libs.py b/asv_bench/benchmarks/libs.py index 3419163bcfe09..7da2d27d98dbb 100644 --- a/asv_bench/benchmarks/libs.py +++ b/asv_bench/benchmarks/libs.py @@ -5,6 +5,7 @@ If a PR does not edit anything in _libs/, then it is unlikely that the benchmarks will be affected. """ + import numpy as np from pandas._libs.lib import ( diff --git a/asv_bench/benchmarks/package.py b/asv_bench/benchmarks/package.py index 257c82cba8878..f8b51a523dab8 100644 --- a/asv_bench/benchmarks/package.py +++ b/asv_bench/benchmarks/package.py @@ -1,6 +1,7 @@ """ Benchmarks for pandas at the package-level. """ + import subprocess import sys diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index ccd86cae06d58..3b8b60e790380 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -2,6 +2,7 @@ Period benchmarks with non-tslibs dependencies. See benchmarks.tslibs.period for benchmarks that rely only on tslibs. """ + from pandas import ( DataFrame, Period, diff --git a/asv_bench/benchmarks/tslibs/offsets.py b/asv_bench/benchmarks/tslibs/offsets.py index 1f48ec504acf1..55bd3c31c055c 100644 --- a/asv_bench/benchmarks/tslibs/offsets.py +++ b/asv_bench/benchmarks/tslibs/offsets.py @@ -2,6 +2,7 @@ offsets benchmarks that rely only on tslibs. See benchmarks.offset for offsets benchmarks that rely on other parts of pandas. """ + from datetime import datetime import numpy as np diff --git a/asv_bench/benchmarks/tslibs/resolution.py b/asv_bench/benchmarks/tslibs/resolution.py index 44f288c7de216..6317d299379d3 100644 --- a/asv_bench/benchmarks/tslibs/resolution.py +++ b/asv_bench/benchmarks/tslibs/resolution.py @@ -17,6 +17,7 @@ df.loc[key] = (val.average, val.stdev) """ + import numpy as np try: diff --git a/asv_bench/benchmarks/tslibs/timedelta.py b/asv_bench/benchmarks/tslibs/timedelta.py index 2daf1861eb80a..dcc73aefc6c7a 100644 --- a/asv_bench/benchmarks/tslibs/timedelta.py +++ b/asv_bench/benchmarks/tslibs/timedelta.py @@ -2,6 +2,7 @@ Timedelta benchmarks that rely only on tslibs. See benchmarks.timedeltas for Timedelta benchmarks that rely on other parts of pandas. """ + import datetime import numpy as np diff --git a/asv_bench/benchmarks/tslibs/tslib.py b/asv_bench/benchmarks/tslibs/tslib.py index 97ec80201dd16..4a011d4bb3f06 100644 --- a/asv_bench/benchmarks/tslibs/tslib.py +++ b/asv_bench/benchmarks/tslibs/tslib.py @@ -15,6 +15,7 @@ val = %timeit -o tr.time_ints_to_pydatetime(box, size, tz) df.loc[key] = (val.average, val.stdev) """ + from datetime import ( timedelta, timezone, diff --git a/doc/make.py b/doc/make.py index c9588ffb80517..02deb5002fea1 100755 --- a/doc/make.py +++ b/doc/make.py @@ -11,6 +11,7 @@ $ python make.py html $ python make.py latex """ + import argparse import csv import importlib diff --git a/pandas/_config/__init__.py b/pandas/_config/__init__.py index c43d59654b44c..e1999dd536999 100644 --- a/pandas/_config/__init__.py +++ b/pandas/_config/__init__.py @@ -5,6 +5,7 @@ importing `dates` and `display` ensures that keys needed by _libs are initialized. """ + __all__ = [ "config", "detect_console_encoding", diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 9decc7eecf033..ebf2ba2510aa4 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -688,15 +688,13 @@ def _build_option_description(k: str) -> str: @overload def pp_options_list( keys: Iterable[str], *, width: int = ..., _print: Literal[False] = ... -) -> str: - ... +) -> str: ... @overload def pp_options_list( keys: Iterable[str], *, width: int = ..., _print: Literal[True] -) -> None: - ... +) -> None: ... def pp_options_list( diff --git a/pandas/_config/dates.py b/pandas/_config/dates.py index b37831f96eb73..2d9f5d390dc9c 100644 --- a/pandas/_config/dates.py +++ b/pandas/_config/dates.py @@ -1,6 +1,7 @@ """ config for datetime formatting """ + from __future__ import annotations from pandas._config import config as cf diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py index 69a56d3911316..61d88c43f0e4a 100644 --- a/pandas/_config/localization.py +++ b/pandas/_config/localization.py @@ -3,6 +3,7 @@ Name `localization` is chosen to avoid overlap with builtin `locale` module. """ + from __future__ import annotations from contextlib import contextmanager diff --git a/pandas/_testing/_hypothesis.py b/pandas/_testing/_hypothesis.py index 4e584781122a3..b7fc175b10d17 100644 --- a/pandas/_testing/_hypothesis.py +++ b/pandas/_testing/_hypothesis.py @@ -1,6 +1,7 @@ """ Hypothesis data generator helpers. """ + from datetime import datetime from hypothesis import strategies as st diff --git a/pandas/_testing/compat.py b/pandas/_testing/compat.py index cc352ba7b8f2f..722ba61a3227f 100644 --- a/pandas/_testing/compat.py +++ b/pandas/_testing/compat.py @@ -1,6 +1,7 @@ """ Helpers for sharing tests between DataFrame/Series """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/_typing.py b/pandas/_typing.py index d7325fed93d62..f868a92554b39 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -140,30 +140,22 @@ class SequenceNotStr(Protocol[_T_co]): @overload - def __getitem__(self, index: SupportsIndex, /) -> _T_co: - ... + def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... @overload - def __getitem__(self, index: slice, /) -> Sequence[_T_co]: - ... + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... - def __contains__(self, value: object, /) -> bool: - ... + def __contains__(self, value: object, /) -> bool: ... - def __len__(self) -> int: - ... + def __len__(self) -> int: ... - def __iter__(self) -> Iterator[_T_co]: - ... + def __iter__(self) -> Iterator[_T_co]: ... - def index(self, value: Any, start: int = ..., stop: int = ..., /) -> int: - ... + def index(self, value: Any, start: int = ..., stop: int = ..., /) -> int: ... - def count(self, value: Any, /) -> int: - ... + def count(self, value: Any, /) -> int: ... - def __reversed__(self) -> Iterator[_T_co]: - ... + def __reversed__(self) -> Iterator[_T_co]: ... ListLike = Union[AnyArrayLike, SequenceNotStr, range] @@ -317,13 +309,11 @@ def flush(self) -> Any: class ReadPickleBuffer(ReadBuffer[bytes], Protocol): - def readline(self) -> bytes: - ... + def readline(self) -> bytes: ... class WriteExcelBuffer(WriteBuffer[bytes], Protocol): - def truncate(self, size: int | None = ...) -> int: - ... + def truncate(self, size: int | None = ...) -> int: ... class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol): diff --git a/pandas/_version.py b/pandas/_version.py index 08a7111324e3b..7bd9da2bb1cfa 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -358,9 +358,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces[ - "error" - ] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" + pieces["error"] = ( + f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" + ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py index a0d42b6541fdf..9b007e8fe8da4 100644 --- a/pandas/api/__init__.py +++ b/pandas/api/__init__.py @@ -1,4 +1,5 @@ -""" public toolkit API """ +"""public toolkit API""" + from pandas.api import ( extensions, indexers, diff --git a/pandas/arrays/__init__.py b/pandas/arrays/__init__.py index a11755275d00e..bcf295fd6b490 100644 --- a/pandas/arrays/__init__.py +++ b/pandas/arrays/__init__.py @@ -3,6 +3,7 @@ See :ref:`extending.extension-types` for more. """ + from pandas.core.arrays import ( ArrowExtensionArray, ArrowStringArray, diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 738442fab8c70..1c08df80df477 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -7,6 +7,7 @@ Other items: * platform checker """ + from __future__ import annotations import os diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 26beca6a0e4b6..f9273ba4bbc62 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -91,8 +91,7 @@ def import_optional_dependency( min_version: str | None = ..., *, errors: Literal["raise"] = ..., -) -> types.ModuleType: - ... +) -> types.ModuleType: ... @overload @@ -102,8 +101,7 @@ def import_optional_dependency( min_version: str | None = ..., *, errors: Literal["warn", "ignore"], -) -> types.ModuleType | None: - ... +) -> types.ModuleType | None: ... def import_optional_dependency( diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index 7fc4b8d1d9b10..54a12c76a230b 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -1,4 +1,5 @@ -""" support numpy compatibility across versions """ +"""support numpy compatibility across versions""" + import warnings import numpy as np diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 4df30f7f4a8a7..9432635f62a35 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -15,6 +15,7 @@ methods that are spread throughout the codebase. This module will make it easier to adjust to future upstream changes in the analogous numpy signatures. """ + from __future__ import annotations from typing import ( @@ -179,13 +180,11 @@ def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) @overload -def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: - ... +def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: ... @overload -def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: - ... +def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: ... def validate_clip_with_axis( diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index f4698bee5cb02..26c44c2613cb2 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -1,6 +1,7 @@ """ Pickle compatibility to pandas version 1.0 """ + from __future__ import annotations import contextlib diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index beb4814914101..5c9e885f8e9f5 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -1,4 +1,4 @@ -""" support pyarrow compatibility across versions """ +"""support pyarrow compatibility across versions""" from __future__ import annotations diff --git a/pandas/conftest.py b/pandas/conftest.py index 5cdb3b59698f5..c9f7ea2096008 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -17,6 +17,7 @@ - Dtypes - Misc """ + from __future__ import annotations from collections import abc diff --git a/pandas/core/_numba/kernels/mean_.py b/pandas/core/_numba/kernels/mean_.py index 4ed9e8cb2bf50..cc10bd003af7e 100644 --- a/pandas/core/_numba/kernels/mean_.py +++ b/pandas/core/_numba/kernels/mean_.py @@ -6,6 +6,7 @@ Mirrors pandas/_libs/window/aggregation.pyx """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/_numba/kernels/min_max_.py b/pandas/core/_numba/kernels/min_max_.py index c9803980e64a6..59d36732ebae6 100644 --- a/pandas/core/_numba/kernels/min_max_.py +++ b/pandas/core/_numba/kernels/min_max_.py @@ -6,6 +6,7 @@ Mirrors pandas/_libs/window/aggregation.pyx """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/_numba/kernels/sum_.py b/pandas/core/_numba/kernels/sum_.py index 94db84267ceec..76f4e22b43c4b 100644 --- a/pandas/core/_numba/kernels/sum_.py +++ b/pandas/core/_numba/kernels/sum_.py @@ -6,6 +6,7 @@ Mirrors pandas/_libs/window/aggregation.pyx """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/_numba/kernels/var_.py b/pandas/core/_numba/kernels/var_.py index c63d0b90b0fc3..69aec4d6522c4 100644 --- a/pandas/core/_numba/kernels/var_.py +++ b/pandas/core/_numba/kernels/var_.py @@ -6,6 +6,7 @@ Mirrors pandas/_libs/window/aggregation.pyx """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 39a5ffd947009..99b5053ce250c 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -4,6 +4,7 @@ that can be mixed into or pinned onto other pandas classes. """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 3672cdb13d4a3..774bbbe2463e9 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -2,6 +2,7 @@ Generic data algorithms. This module is experimental at the moment and not intended for public consumption """ + from __future__ import annotations import decimal diff --git a/pandas/core/apply.py b/pandas/core/apply.py index d9d95c96ba0fe..f2fb503be86f5 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1300,9 +1300,10 @@ def apply_with_numba(self) -> dict[int, Any]: # Convert from numba dict to regular dict # Our isinstance checks in the df constructor don't pass for numbas typed dict - with set_numba_data(self.obj.index) as index, set_numba_data( - self.columns - ) as columns: + with ( + set_numba_data(self.obj.index) as index, + set_numba_data(self.columns) as columns, + ): res = dict(nb_func(self.values, columns, index)) return res diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py index 335fa1afc0f4e..3784689995802 100644 --- a/pandas/core/array_algos/masked_reductions.py +++ b/pandas/core/array_algos/masked_reductions.py @@ -2,6 +2,7 @@ masked_reductions.py is for reduction algorithms using a mask-based approach for missing values. """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index f65d2d20e028e..464a4d552af68 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -1,6 +1,7 @@ """ EA-compatible analogue to np.putmask """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 7293a46eb9a60..6cc867c60fd82 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -1,6 +1,7 @@ """ Methods used by Block.replace and related methods. """ + from __future__ import annotations import operator diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index ac674e31586e7..ca2c7a3b9664f 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -41,8 +41,7 @@ def take_nd( axis: AxisInt = ..., fill_value=..., allow_fill: bool = ..., -) -> np.ndarray: - ... +) -> np.ndarray: ... @overload @@ -52,8 +51,7 @@ def take_nd( axis: AxisInt = ..., fill_value=..., allow_fill: bool = ..., -) -> ArrayLike: - ... +) -> ArrayLike: ... def take_nd( diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index dde1b8a35e2f0..1fa610f35f56b 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -4,6 +4,7 @@ Index ExtensionArray """ + from __future__ import annotations import operator diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 83d2b6f1ca84f..c1d0ade572e8a 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -267,15 +267,13 @@ def _validate_setitem_value(self, value): return value @overload - def __getitem__(self, key: ScalarIndexer) -> Any: - ... + def __getitem__(self, key: ScalarIndexer) -> Any: ... @overload def __getitem__( self, key: SequenceIndexer | PositionalIndexerTuple, - ) -> Self: - ... + ) -> Self: ... def __getitem__( self, diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py index 3e89391324ad4..600ddc7f717a8 100644 --- a/pandas/core/arrays/_ranges.py +++ b/pandas/core/arrays/_ranges.py @@ -2,6 +2,7 @@ Helper functions to generate range-like data for DatetimeArray (and possibly TimedeltaArray/PeriodArray) """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index cddccd7b45a3e..aaf43662ebde2 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -852,12 +852,10 @@ def isna(self) -> npt.NDArray[np.bool_]: return self._pa_array.is_null().to_numpy() @overload - def any(self, *, skipna: Literal[True] = ..., **kwargs) -> bool: - ... + def any(self, *, skipna: Literal[True] = ..., **kwargs) -> bool: ... @overload - def any(self, *, skipna: bool, **kwargs) -> bool | NAType: - ... + def any(self, *, skipna: bool, **kwargs) -> bool | NAType: ... def any(self, *, skipna: bool = True, **kwargs) -> bool | NAType: """ @@ -918,12 +916,10 @@ def any(self, *, skipna: bool = True, **kwargs) -> bool | NAType: return self._reduce("any", skipna=skipna, **kwargs) @overload - def all(self, *, skipna: Literal[True] = ..., **kwargs) -> bool: - ... + def all(self, *, skipna: Literal[True] = ..., **kwargs) -> bool: ... @overload - def all(self, *, skipna: bool, **kwargs) -> bool | NAType: - ... + def all(self, *, skipna: bool, **kwargs) -> bool | NAType: ... def all(self, *, skipna: bool = True, **kwargs) -> bool | NAType: """ diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index a0da3518f8e5e..399be217af9d1 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -6,6 +6,7 @@ This is an experimental API and subject to breaking changes without warning. """ + from __future__ import annotations import operator @@ -397,12 +398,10 @@ def _from_factorized(cls, values, original): # Must be a Sequence # ------------------------------------------------------------------------ @overload - def __getitem__(self, item: ScalarIndexer) -> Any: - ... + def __getitem__(self, item: ScalarIndexer) -> Any: ... @overload - def __getitem__(self, item: SequenceIndexer) -> Self: - ... + def __getitem__(self, item: SequenceIndexer) -> Self: ... def __getitem__(self, item: PositionalIndexer) -> Self | Any: """ @@ -648,16 +647,13 @@ def nbytes(self) -> int: # ------------------------------------------------------------------------ @overload - def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: - ... + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: ... @overload - def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: - ... + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: ... @overload - def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: - ... + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: ... def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: """ @@ -2411,23 +2407,19 @@ def _groupby_op( class ExtensionArraySupportsAnyAll(ExtensionArray): @overload - def any(self, *, skipna: Literal[True] = ...) -> bool: - ... + def any(self, *, skipna: Literal[True] = ...) -> bool: ... @overload - def any(self, *, skipna: bool) -> bool | NAType: - ... + def any(self, *, skipna: bool) -> bool | NAType: ... def any(self, *, skipna: bool = True) -> bool | NAType: raise AbstractMethodError(self) @overload - def all(self, *, skipna: Literal[True] = ...) -> bool: - ... + def all(self, *, skipna: Literal[True] = ...) -> bool: ... @overload - def all(self, *, skipna: bool) -> bool | NAType: - ... + def all(self, *, skipna: bool) -> bool | NAType: ... def all(self, *, skipna: bool = True) -> bool | NAType: raise AbstractMethodError(self) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f37513b2bc8fd..af8dc08c1ec26 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -554,16 +554,13 @@ def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: return res @overload - def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: - ... + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: ... @overload - def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: - ... + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: ... @overload - def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: - ... + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: ... def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: """ @@ -1975,14 +1972,12 @@ def sort_values( inplace: Literal[False] = ..., ascending: bool = ..., na_position: str = ..., - ) -> Self: - ... + ) -> Self: ... @overload def sort_values( self, *, inplace: Literal[True], ascending: bool = ..., na_position: str = ... - ) -> None: - ... + ) -> None: ... def sort_values( self, @@ -2667,12 +2662,10 @@ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: return algorithms.isin(self.codes, code_values) @overload - def _replace(self, *, to_replace, value, inplace: Literal[False] = ...) -> Self: - ... + def _replace(self, *, to_replace, value, inplace: Literal[False] = ...) -> Self: ... @overload - def _replace(self, *, to_replace, value, inplace: Literal[True]) -> None: - ... + def _replace(self, *, to_replace, value, inplace: Literal[True]) -> None: ... def _replace(self, *, to_replace, value, inplace: bool = False) -> Self | None: from pandas import Index diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 14967bb81125d..dd7274c3d79f7 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -362,15 +362,13 @@ def __array__( return self._ndarray @overload - def __getitem__(self, key: ScalarIndexer) -> DTScalarOrNaT: - ... + def __getitem__(self, key: ScalarIndexer) -> DTScalarOrNaT: ... @overload def __getitem__( self, key: SequenceIndexer | PositionalIndexerTuple, - ) -> Self: - ... + ) -> Self: ... def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT: """ @@ -498,20 +496,16 @@ def astype(self, dtype, copy: bool = True): return np.asarray(self, dtype=dtype) @overload - def view(self) -> Self: - ... + def view(self) -> Self: ... @overload - def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray: - ... + def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray: ... @overload - def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray: - ... + def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray: ... @overload - def view(self, dtype: Dtype | None = ...) -> ArrayLike: - ... + def view(self, dtype: Dtype | None = ...) -> ArrayLike: ... # pylint: disable-next=useless-parent-delegation def view(self, dtype: Dtype | None = None) -> ArrayLike: @@ -2527,13 +2521,11 @@ def ensure_arraylike_for_datetimelike( @overload -def validate_periods(periods: None) -> None: - ... +def validate_periods(periods: None) -> None: ... @overload -def validate_periods(periods: int | float) -> int: - ... +def validate_periods(periods: int | float) -> int: ... def validate_periods(periods: int | float | None) -> int | None: diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 4ef5c04461ce9..931f19a7901bd 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -103,13 +103,11 @@ @overload -def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype: - ... +def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype: ... @overload -def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]: - ... +def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]: ... def tz_to_dtype( diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 5e7e7e949169b..1ea32584403ba 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -706,12 +706,10 @@ def __len__(self) -> int: return len(self._left) @overload - def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: - ... + def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: ... @overload - def __getitem__(self, key: SequenceIndexer) -> Self: - ... + def __getitem__(self, key: SequenceIndexer) -> Self: ... def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA: key = check_array_indexer(self, key) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index cf9ba3c3dbad5..108202f5e510b 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -175,12 +175,10 @@ def dtype(self) -> BaseMaskedDtype: raise AbstractMethodError(self) @overload - def __getitem__(self, item: ScalarIndexer) -> Any: - ... + def __getitem__(self, item: ScalarIndexer) -> Any: ... @overload - def __getitem__(self, item: SequenceIndexer) -> Self: - ... + def __getitem__(self, item: SequenceIndexer) -> Self: ... def __getitem__(self, item: PositionalIndexer) -> Self | Any: item = check_array_indexer(self, item) @@ -535,16 +533,13 @@ def tolist(self) -> list: return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist() @overload - def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: - ... + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: ... @overload - def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: - ... + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: ... @overload - def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: - ... + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: ... def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: dtype = pandas_dtype(dtype) @@ -1342,14 +1337,12 @@ def map(self, mapper, na_action=None): @overload def any( self, *, skipna: Literal[True] = ..., axis: AxisInt | None = ..., **kwargs - ) -> np.bool_: - ... + ) -> np.bool_: ... @overload def any( self, *, skipna: bool, axis: AxisInt | None = ..., **kwargs - ) -> np.bool_ | NAType: - ... + ) -> np.bool_ | NAType: ... def any( self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs @@ -1437,14 +1430,12 @@ def any( @overload def all( self, *, skipna: Literal[True] = ..., axis: AxisInt | None = ..., **kwargs - ) -> np.bool_: - ... + ) -> np.bool_: ... @overload def all( self, *, skipna: bool, axis: AxisInt | None = ..., **kwargs - ) -> np.bool_ | NAType: - ... + ) -> np.bool_ | NAType: ... def all( self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 73cc8e4345d3c..d05f857f46179 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -1124,13 +1124,11 @@ def period_array( @overload -def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: - ... +def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: ... @overload -def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: - ... +def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: ... def validate_dtype_freq( diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py index 6608fcce2cd62..58199701647d1 100644 --- a/pandas/core/arrays/sparse/accessor.py +++ b/pandas/core/arrays/sparse/accessor.py @@ -1,4 +1,5 @@ """Sparse accessor""" + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 9b1d4d70ee32e..8d94662ab4303 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1,6 +1,7 @@ """ SparseArray data structure """ + from __future__ import annotations from collections import abc @@ -930,15 +931,13 @@ def value_counts(self, dropna: bool = True) -> Series: # Indexing # -------- @overload - def __getitem__(self, key: ScalarIndexer) -> Any: - ... + def __getitem__(self, key: ScalarIndexer) -> Any: ... @overload def __getitem__( self, key: SequenceIndexer | tuple[int | ellipsis, ...], - ) -> Self: - ... + ) -> Self: ... def __getitem__( self, @@ -1916,13 +1915,11 @@ def _make_sparse( @overload -def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex: - ... +def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex: ... @overload -def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex: - ... +def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex: ... def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex: diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index 31e09c923d933..cc9fd2d5fb8b0 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -3,6 +3,7 @@ Currently only includes to_coo helpers. """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/base.py b/pandas/core/base.py index 4556b9ab4d4c9..33b37319675ae 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1319,8 +1319,7 @@ def searchsorted( # type: ignore[overload-overlap] value: ScalarLike_co, side: Literal["left", "right"] = ..., sorter: NumpySorter = ..., - ) -> np.intp: - ... + ) -> np.intp: ... @overload def searchsorted( @@ -1328,8 +1327,7 @@ def searchsorted( value: npt.ArrayLike | ExtensionArray, side: Literal["left", "right"] = ..., sorter: NumpySorter = ..., - ) -> npt.NDArray[np.intp]: - ... + ) -> npt.NDArray[np.intp]: ... @doc(_shared_docs["searchsorted"], klass="Index") def searchsorted( diff --git a/pandas/core/common.py b/pandas/core/common.py index 5f37f3de578e8..77e986a26fbe9 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -3,6 +3,7 @@ Note: pandas.core.common is *not* part of the public API. """ + from __future__ import annotations import builtins @@ -227,8 +228,7 @@ def asarray_tuplesafe( @overload -def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike: - ... +def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike: ... def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike: @@ -422,15 +422,13 @@ def standardize_mapping(into): @overload -def random_state(state: np.random.Generator) -> np.random.Generator: - ... +def random_state(state: np.random.Generator) -> np.random.Generator: ... @overload def random_state( state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None, -) -> np.random.RandomState: - ... +) -> np.random.RandomState: ... def random_state(state: RandomState | None = None): @@ -477,8 +475,7 @@ def pipe( func: Callable[Concatenate[_T, P], T], *args: P.args, **kwargs: P.kwargs, -) -> T: - ... +) -> T: ... @overload @@ -487,8 +484,7 @@ def pipe( func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, -) -> T: - ... +) -> T: ... def pipe( diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 2a48bb280a35f..c5562fb0284b7 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -1,6 +1,7 @@ """ Core eval alignment algorithms. """ + from __future__ import annotations from functools import ( diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index a3a05a9d75c6e..5db05ebe33efd 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -1,6 +1,7 @@ """ Engine classes for :func:`~pandas.eval` """ + from __future__ import annotations import abc diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index 6c234b40d27e6..c949cfd1bc657 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -1,6 +1,7 @@ """ Top level ``eval`` module. """ + from __future__ import annotations import tokenize diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index f0aa7363d2644..a8123a898b4fe 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -1,6 +1,7 @@ """ :func:`~pandas.eval` parsers. """ + from __future__ import annotations import ast diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 17a68478196da..e2acd9a2c97c2 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -5,6 +5,7 @@ Offer fast expression evaluation through numexpr """ + from __future__ import annotations import operator diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py index 4cfa0f2baffd5..8fbf8936d31ef 100644 --- a/pandas/core/computation/parsing.py +++ b/pandas/core/computation/parsing.py @@ -1,6 +1,7 @@ """ :func:`~pandas.eval` source string parsing functions """ + from __future__ import annotations from io import StringIO diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index cec8a89abc0b2..39511048abf49 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -1,4 +1,5 @@ -""" manage PyTables query interface via Expressions """ +"""manage PyTables query interface via Expressions""" + from __future__ import annotations import ast diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 7e553ca448218..7b31e03e58b4b 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -1,6 +1,7 @@ """ Module for scope operations """ + from __future__ import annotations from collections import ChainMap diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 38cc5a9ab10e6..d9a8b4dfd95fd 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -9,6 +9,7 @@ module is imported, register them here rather than in the module. """ + from __future__ import annotations import os diff --git a/pandas/core/construction.py b/pandas/core/construction.py index af2aea11dcf6d..e6d99ab773db9 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -4,6 +4,7 @@ These should not depend on core.internals. """ + from __future__ import annotations from collections.abc import Sequence @@ -402,15 +403,13 @@ def array( @overload def extract_array( obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... -) -> ArrayLike: - ... +) -> ArrayLike: ... @overload def extract_array( obj: T, extract_numpy: bool = ..., extract_range: bool = ... -) -> T | ArrayLike: - ... +) -> T | ArrayLike: ... def extract_array( diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py index f5579082c679b..bdb16aa202297 100644 --- a/pandas/core/dtypes/astype.py +++ b/pandas/core/dtypes/astype.py @@ -2,6 +2,7 @@ Functions for implementing 'astype' methods according to pandas conventions, particularly ones that differ from numpy. """ + from __future__ import annotations import inspect @@ -42,15 +43,13 @@ @overload def _astype_nansafe( arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ... -) -> np.ndarray: - ... +) -> np.ndarray: ... @overload def _astype_nansafe( arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ... -) -> ExtensionArray: - ... +) -> ExtensionArray: ... def _astype_nansafe( diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 41407704dfc8a..2f8e59cd6e89c 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -1,6 +1,7 @@ """ Extend pandas with custom array types. """ + from __future__ import annotations from typing import ( @@ -97,8 +98,7 @@ class property**. >>> class ExtensionDtype: ... def __from_arrow__( ... self, array: pyarrow.Array | pyarrow.ChunkedArray - ... ) -> ExtensionArray: - ... ... + ... ) -> ExtensionArray: ... This class does not inherit from 'abc.ABCMeta' for performance reasons. Methods and properties required by the interface raise @@ -528,22 +528,18 @@ def register(self, dtype: type_t[ExtensionDtype]) -> None: self.dtypes.append(dtype) @overload - def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: - ... + def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: ... @overload - def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT: - ... + def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT: ... @overload - def find(self, dtype: str) -> ExtensionDtype | None: - ... + def find(self, dtype: str) -> ExtensionDtype | None: ... @overload def find( self, dtype: npt.DTypeLike - ) -> type_t[ExtensionDtype] | ExtensionDtype | None: - ... + ) -> type_t[ExtensionDtype] | ExtensionDtype | None: ... def find( self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 01b7d500179bf..a130983337f64 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -247,13 +247,15 @@ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj) -> None: @overload -def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray: - ... +def maybe_downcast_to_dtype( + result: np.ndarray, dtype: str | np.dtype +) -> np.ndarray: ... @overload -def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike: - ... +def maybe_downcast_to_dtype( + result: ExtensionArray, dtype: str | np.dtype +) -> ArrayLike: ... def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike: @@ -317,15 +319,13 @@ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLi @overload def maybe_downcast_numeric( result: np.ndarray, dtype: np.dtype, do_round: bool = False -) -> np.ndarray: - ... +) -> np.ndarray: ... @overload def maybe_downcast_numeric( result: ExtensionArray, dtype: DtypeObj, do_round: bool = False -) -> ArrayLike: - ... +) -> ArrayLike: ... def maybe_downcast_numeric( @@ -513,13 +513,11 @@ def _maybe_cast_to_extension_array( @overload -def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: - ... +def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: ... @overload -def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: - ... +def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: ... def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: @@ -1418,18 +1416,15 @@ def np_find_common_type(*dtypes: np.dtype) -> np.dtype: @overload -def find_common_type(types: list[np.dtype]) -> np.dtype: - ... +def find_common_type(types: list[np.dtype]) -> np.dtype: ... @overload -def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: - ... +def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: ... @overload -def find_common_type(types: list[DtypeObj]) -> DtypeObj: - ... +def find_common_type(types: list[DtypeObj]) -> DtypeObj: ... def find_common_type(types): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 15c51b98aea0b..aa621fea6c39a 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1,6 +1,7 @@ """ Common type operations. """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 7d5e88b502a00..f702d5a60e86f 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -1,6 +1,7 @@ """ Utility functions related to concat. """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 27b9c0dec2796..2bb2556c88204 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -1,6 +1,7 @@ """ Define extension dtypes. """ + from __future__ import annotations from datetime import ( diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 8abde2ab7010f..8d3d86217dedf 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -1,4 +1,5 @@ -""" define generic base classes for pandas objects """ +"""define generic base classes for pandas objects""" + from __future__ import annotations from typing import ( diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 02189dd10e5b8..f042911b53d2b 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -1,4 +1,4 @@ -""" basic inference routines """ +"""basic inference routines""" from __future__ import annotations diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 97efb5db9baa9..f127c736e745a 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -1,6 +1,7 @@ """ missing types & inference """ + from __future__ import annotations from decimal import Decimal @@ -68,31 +69,28 @@ @overload -def isna(obj: Scalar | Pattern | NAType | NaTType) -> bool: - ... +def isna(obj: Scalar | Pattern | NAType | NaTType) -> bool: ... @overload def isna( obj: ArrayLike | Index | list, -) -> npt.NDArray[np.bool_]: - ... +) -> npt.NDArray[np.bool_]: ... @overload -def isna(obj: NDFrameT) -> NDFrameT: - ... +def isna(obj: NDFrameT) -> NDFrameT: ... # handle unions @overload -def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: - ... +def isna( + obj: NDFrameT | ArrayLike | Index | list, +) -> NDFrameT | npt.NDArray[np.bool_]: ... @overload -def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: - ... +def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: @@ -285,31 +283,28 @@ def _isna_recarray_dtype(values: np.rec.recarray) -> npt.NDArray[np.bool_]: @overload -def notna(obj: Scalar | Pattern | NAType | NaTType) -> bool: - ... +def notna(obj: Scalar | Pattern | NAType | NaTType) -> bool: ... @overload def notna( obj: ArrayLike | Index | list, -) -> npt.NDArray[np.bool_]: - ... +) -> npt.NDArray[np.bool_]: ... @overload -def notna(obj: NDFrameT) -> NDFrameT: - ... +def notna(obj: NDFrameT) -> NDFrameT: ... # handle unions @overload -def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: - ... +def notna( + obj: NDFrameT | ArrayLike | Index | list, +) -> NDFrameT | npt.NDArray[np.bool_]: ... @overload -def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: - ... +def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3ab40c1aeb64b..25501ff245e46 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8,6 +8,7 @@ alignment and a host of useful data manipulation methods having to do with the labeling information """ + from __future__ import annotations import collections @@ -1216,8 +1217,7 @@ def to_string( min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., - ) -> str: - ... + ) -> str: ... @overload def to_string( @@ -1242,8 +1242,7 @@ def to_string( min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., - ) -> None: - ... + ) -> None: ... @Substitution( header_type="bool or list of str", @@ -1573,12 +1572,10 @@ def __len__(self) -> int: return len(self.index) @overload - def dot(self, other: Series) -> Series: - ... + def dot(self, other: Series) -> Series: ... @overload - def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: - ... + def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ @@ -1699,12 +1696,10 @@ def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: raise TypeError(f"unsupported type: {type(other)}") @overload - def __matmul__(self, other: Series) -> Series: - ... + def __matmul__(self, other: Series) -> Series: ... @overload - def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: - ... + def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ @@ -1930,8 +1925,7 @@ def to_dict( *, into: type[MutableMappingT] | MutableMappingT, index: bool = ..., - ) -> MutableMappingT: - ... + ) -> MutableMappingT: ... @overload def to_dict( @@ -1940,8 +1934,7 @@ def to_dict( *, into: type[MutableMappingT] | MutableMappingT, index: bool = ..., - ) -> list[MutableMappingT]: - ... + ) -> list[MutableMappingT]: ... @overload def to_dict( @@ -1950,8 +1943,7 @@ def to_dict( *, into: type[dict] = ..., index: bool = ..., - ) -> dict: - ... + ) -> dict: ... @overload def to_dict( @@ -1960,8 +1952,7 @@ def to_dict( *, into: type[dict] = ..., index: bool = ..., - ) -> list[dict]: - ... + ) -> list[dict]: ... # error: Incompatible default for argument "into" (default has type "type # [dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") @@ -2697,8 +2688,7 @@ def to_markdown( index: bool = ..., storage_options: StorageOptions | None = ..., **kwargs, - ) -> str: - ... + ) -> str: ... @overload def to_markdown( @@ -2709,8 +2699,7 @@ def to_markdown( index: bool = ..., storage_options: StorageOptions | None = ..., **kwargs, - ) -> None: - ... + ) -> None: ... @overload def to_markdown( @@ -2721,8 +2710,7 @@ def to_markdown( index: bool = ..., storage_options: StorageOptions | None = ..., **kwargs, - ) -> str | None: - ... + ) -> str | None: ... @doc( Series.to_markdown, @@ -2785,8 +2773,7 @@ def to_parquet( partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, - ) -> bytes: - ... + ) -> bytes: ... @overload def to_parquet( @@ -2799,8 +2786,7 @@ def to_parquet( partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, - ) -> None: - ... + ) -> None: ... @doc(storage_options=_shared_docs["storage_options"]) def to_parquet( @@ -2913,8 +2899,7 @@ def to_orc( engine: Literal["pyarrow"] = ..., index: bool | None = ..., engine_kwargs: dict[str, Any] | None = ..., - ) -> bytes: - ... + ) -> bytes: ... @overload def to_orc( @@ -2924,8 +2909,7 @@ def to_orc( engine: Literal["pyarrow"] = ..., index: bool | None = ..., engine_kwargs: dict[str, Any] | None = ..., - ) -> None: - ... + ) -> None: ... @overload def to_orc( @@ -2935,8 +2919,7 @@ def to_orc( engine: Literal["pyarrow"] = ..., index: bool | None = ..., engine_kwargs: dict[str, Any] | None = ..., - ) -> bytes | None: - ... + ) -> bytes | None: ... def to_orc( self, @@ -3053,8 +3036,7 @@ def to_html( table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., - ) -> None: - ... + ) -> None: ... @overload def to_html( @@ -3083,8 +3065,7 @@ def to_html( table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., - ) -> str: - ... + ) -> str: ... @Substitution( header_type="bool", @@ -3225,8 +3206,7 @@ def to_xml( stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., compression: CompressionOptions = ..., storage_options: StorageOptions | None = ..., - ) -> str: - ... + ) -> str: ... @overload def to_xml( @@ -3248,8 +3228,7 @@ def to_xml( stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., compression: CompressionOptions = ..., storage_options: StorageOptions | None = ..., - ) -> None: - ... + ) -> None: ... @doc( storage_options=_shared_docs["storage_options"], @@ -4384,16 +4363,17 @@ def _get_item(self, item: Hashable) -> Series: # Unsorted @overload - def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: - ... + def query( + self, expr: str, *, inplace: Literal[False] = ..., **kwargs + ) -> DataFrame: ... @overload - def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: - ... + def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... @overload - def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: - ... + def query( + self, expr: str, *, inplace: bool = ..., **kwargs + ) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ @@ -4554,12 +4534,10 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No return result @overload - def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: - ... + def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... @overload - def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: - ... + def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ @@ -5114,8 +5092,7 @@ def drop( level: Level = ..., inplace: Literal[True], errors: IgnoreRaise = ..., - ) -> None: - ... + ) -> None: ... @overload def drop( @@ -5128,8 +5105,7 @@ def drop( level: Level = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def drop( @@ -5142,8 +5118,7 @@ def drop( level: Level = ..., inplace: bool = ..., errors: IgnoreRaise = ..., - ) -> DataFrame | None: - ... + ) -> DataFrame | None: ... def drop( self, @@ -5325,8 +5300,7 @@ def rename( inplace: Literal[True], level: Level = ..., errors: IgnoreRaise = ..., - ) -> None: - ... + ) -> None: ... @overload def rename( @@ -5340,8 +5314,7 @@ def rename( inplace: Literal[False] = ..., level: Level = ..., errors: IgnoreRaise = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def rename( @@ -5355,8 +5328,7 @@ def rename( inplace: bool = ..., level: Level = ..., errors: IgnoreRaise = ..., - ) -> DataFrame | None: - ... + ) -> DataFrame | None: ... def rename( self, @@ -5549,14 +5521,12 @@ def pop(self, item: Hashable) -> Series: @overload def _replace_columnwise( self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[True], regex - ) -> None: - ... + ) -> None: ... @overload def _replace_columnwise( self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[False], regex - ) -> Self: - ... + ) -> Self: ... def _replace_columnwise( self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex @@ -5710,8 +5680,7 @@ def set_index( append: bool = ..., inplace: Literal[False] = ..., verify_integrity: bool = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def set_index( @@ -5722,8 +5691,7 @@ def set_index( append: bool = ..., inplace: Literal[True], verify_integrity: bool = ..., - ) -> None: - ... + ) -> None: ... def set_index( self, @@ -5943,8 +5911,7 @@ def reset_index( col_fill: Hashable = ..., allow_duplicates: bool | lib.NoDefault = ..., names: Hashable | Sequence[Hashable] | None = None, - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def reset_index( @@ -5957,8 +5924,7 @@ def reset_index( col_fill: Hashable = ..., allow_duplicates: bool | lib.NoDefault = ..., names: Hashable | Sequence[Hashable] | None = None, - ) -> None: - ... + ) -> None: ... @overload def reset_index( @@ -5971,8 +5937,7 @@ def reset_index( col_fill: Hashable = ..., allow_duplicates: bool | lib.NoDefault = ..., names: Hashable | Sequence[Hashable] | None = None, - ) -> DataFrame | None: - ... + ) -> DataFrame | None: ... def reset_index( self, @@ -6258,8 +6223,7 @@ def dropna( subset: IndexLabel = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def dropna( @@ -6271,8 +6235,7 @@ def dropna( subset: IndexLabel = ..., inplace: Literal[True], ignore_index: bool = ..., - ) -> None: - ... + ) -> None: ... def dropna( self, @@ -6445,8 +6408,7 @@ def drop_duplicates( keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ..., - ) -> None: - ... + ) -> None: ... @overload def drop_duplicates( @@ -6456,8 +6418,7 @@ def drop_duplicates( keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def drop_duplicates( @@ -6467,8 +6428,7 @@ def drop_duplicates( keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ..., - ) -> DataFrame | None: - ... + ) -> DataFrame | None: ... def drop_duplicates( self, @@ -6727,8 +6687,7 @@ def sort_values( na_position: NaPosition = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def sort_values( @@ -6742,8 +6701,7 @@ def sort_values( na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> None: - ... + ) -> None: ... def sort_values( self, @@ -7023,8 +6981,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> None: - ... + ) -> None: ... @overload def sort_index( @@ -7039,8 +6996,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def sort_index( @@ -7055,8 +7011,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> DataFrame | None: - ... + ) -> DataFrame | None: ... def sort_index( self, @@ -11356,8 +11311,7 @@ def any( bool_only: bool = ..., skipna: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def any( @@ -11367,8 +11321,7 @@ def any( bool_only: bool = ..., skipna: bool = ..., **kwargs, - ) -> bool: - ... + ) -> bool: ... @overload def any( @@ -11378,8 +11331,7 @@ def any( bool_only: bool = ..., skipna: bool = ..., **kwargs, - ) -> Series | bool: - ... + ) -> Series | bool: ... @doc(make_doc("any", ndim=2)) def any( @@ -11405,8 +11357,7 @@ def all( bool_only: bool = ..., skipna: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def all( @@ -11416,8 +11367,7 @@ def all( bool_only: bool = ..., skipna: bool = ..., **kwargs, - ) -> bool: - ... + ) -> bool: ... @overload def all( @@ -11427,8 +11377,7 @@ def all( bool_only: bool = ..., skipna: bool = ..., **kwargs, - ) -> Series | bool: - ... + ) -> Series | bool: ... @doc(make_doc("all", ndim=2)) def all( @@ -11454,8 +11403,7 @@ def min( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def min( @@ -11465,8 +11413,7 @@ def min( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def min( @@ -11476,8 +11423,7 @@ def min( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("min", ndim=2)) def min( @@ -11503,8 +11449,7 @@ def max( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def max( @@ -11514,8 +11459,7 @@ def max( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def max( @@ -11525,8 +11469,7 @@ def max( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("max", ndim=2)) def max( @@ -11592,8 +11535,7 @@ def mean( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def mean( @@ -11603,8 +11545,7 @@ def mean( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def mean( @@ -11614,8 +11555,7 @@ def mean( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("mean", ndim=2)) def mean( @@ -11641,8 +11581,7 @@ def median( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def median( @@ -11652,8 +11591,7 @@ def median( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def median( @@ -11663,8 +11601,7 @@ def median( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("median", ndim=2)) def median( @@ -11691,8 +11628,7 @@ def sem( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def sem( @@ -11703,8 +11639,7 @@ def sem( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def sem( @@ -11715,8 +11650,7 @@ def sem( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("sem", ndim=2)) def sem( @@ -11744,8 +11678,7 @@ def var( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def var( @@ -11756,8 +11689,7 @@ def var( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def var( @@ -11768,8 +11700,7 @@ def var( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("var", ndim=2)) def var( @@ -11797,8 +11728,7 @@ def std( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def std( @@ -11809,8 +11739,7 @@ def std( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def std( @@ -11821,8 +11750,7 @@ def std( ddof: int = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("std", ndim=2)) def std( @@ -11849,8 +11777,7 @@ def skew( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def skew( @@ -11860,8 +11787,7 @@ def skew( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def skew( @@ -11871,8 +11797,7 @@ def skew( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("skew", ndim=2)) def skew( @@ -11898,8 +11823,7 @@ def kurt( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series: - ... + ) -> Series: ... @overload def kurt( @@ -11909,8 +11833,7 @@ def kurt( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Any: - ... + ) -> Any: ... @overload def kurt( @@ -11920,8 +11843,7 @@ def kurt( skipna: bool = ..., numeric_only: bool = ..., **kwargs, - ) -> Series | Any: - ... + ) -> Series | Any: ... @doc(make_doc("kurt", ndim=2)) def kurt( @@ -12187,8 +12109,7 @@ def quantile( numeric_only: bool = ..., interpolation: QuantileInterpolation = ..., method: Literal["single", "table"] = ..., - ) -> Series: - ... + ) -> Series: ... @overload def quantile( @@ -12198,8 +12119,7 @@ def quantile( numeric_only: bool = ..., interpolation: QuantileInterpolation = ..., method: Literal["single", "table"] = ..., - ) -> Series | DataFrame: - ... + ) -> Series | DataFrame: ... @overload def quantile( @@ -12209,8 +12129,7 @@ def quantile( numeric_only: bool = ..., interpolation: QuantileInterpolation = ..., method: Literal["single", "table"] = ..., - ) -> Series | DataFrame: - ... + ) -> Series | DataFrame: ... def quantile( self, @@ -12841,9 +12760,9 @@ def values(self) -> np.ndarray: def _from_nested_dict( data: Mapping[HashableT, Mapping[HashableT2, T]], ) -> collections.defaultdict[HashableT2, dict[HashableT, T]]: - new_data: collections.defaultdict[ - HashableT2, dict[HashableT, T] - ] = collections.defaultdict(dict) + new_data: collections.defaultdict[HashableT2, dict[HashableT, T]] = ( + collections.defaultdict(dict) + ) for index, s in data.items(): for col, v in s.items(): new_data[col][index] = v diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bfbe257911d0a..5c8842162007d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -723,16 +723,15 @@ def set_axis( return self._set_axis_nocheck(labels, axis, inplace=False) @overload - def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[False]) -> Self: - ... + def _set_axis_nocheck( + self, labels, axis: Axis, inplace: Literal[False] + ) -> Self: ... @overload - def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[True]) -> None: - ... + def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... @overload - def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None: - ... + def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None: ... @final def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None: @@ -953,8 +952,7 @@ def _rename( inplace: Literal[False] = ..., level: Level | None = ..., errors: str = ..., - ) -> Self: - ... + ) -> Self: ... @overload def _rename( @@ -968,8 +966,7 @@ def _rename( inplace: Literal[True], level: Level | None = ..., errors: str = ..., - ) -> None: - ... + ) -> None: ... @overload def _rename( @@ -983,8 +980,7 @@ def _rename( inplace: bool, level: Level | None = ..., errors: str = ..., - ) -> Self | None: - ... + ) -> Self | None: ... @final def _rename( @@ -1067,8 +1063,7 @@ def rename_axis( axis: Axis = ..., copy: bool | None = ..., inplace: Literal[False] = ..., - ) -> Self: - ... + ) -> Self: ... @overload def rename_axis( @@ -1080,8 +1075,7 @@ def rename_axis( axis: Axis = ..., copy: bool | None = ..., inplace: Literal[True], - ) -> None: - ... + ) -> None: ... @overload def rename_axis( @@ -1093,8 +1087,7 @@ def rename_axis( axis: Axis = ..., copy: bool | None = ..., inplace: bool = ..., - ) -> Self | None: - ... + ) -> Self | None: ... def rename_axis( self, @@ -1266,16 +1259,17 @@ class name @overload def _set_axis_name( self, name, axis: Axis = ..., *, inplace: Literal[False] = ... - ) -> Self: - ... + ) -> Self: ... @overload - def _set_axis_name(self, name, axis: Axis = ..., *, inplace: Literal[True]) -> None: - ... + def _set_axis_name( + self, name, axis: Axis = ..., *, inplace: Literal[True] + ) -> None: ... @overload - def _set_axis_name(self, name, axis: Axis = ..., *, inplace: bool) -> Self | None: - ... + def _set_axis_name( + self, name, axis: Axis = ..., *, inplace: bool + ) -> Self | None: ... @final def _set_axis_name( @@ -3200,8 +3194,7 @@ def to_latex( caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., - ) -> str: - ... + ) -> str: ... @overload def to_latex( @@ -3228,8 +3221,7 @@ def to_latex( caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., - ) -> None: - ... + ) -> None: ... @final def to_latex( @@ -3610,8 +3602,7 @@ def to_csv( decimal: str = ..., errors: OpenFileErrors = ..., storage_options: StorageOptions = ..., - ) -> str: - ... + ) -> str: ... @overload def to_csv( @@ -3638,8 +3629,7 @@ def to_csv( decimal: str = ..., errors: OpenFileErrors = ..., storage_options: StorageOptions = ..., - ) -> None: - ... + ) -> None: ... @final @doc( @@ -4403,8 +4393,7 @@ def drop( level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., - ) -> None: - ... + ) -> None: ... @overload def drop( @@ -4417,8 +4406,7 @@ def drop( level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., - ) -> Self: - ... + ) -> Self: ... @overload def drop( @@ -4431,8 +4419,7 @@ def drop( level: Level | None = ..., inplace: bool = ..., errors: IgnoreRaise = ..., - ) -> Self | None: - ... + ) -> Self | None: ... def drop( self, @@ -4726,8 +4713,7 @@ def sort_values( na_position: NaPosition = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> Self: - ... + ) -> Self: ... @overload def sort_values( @@ -4740,8 +4726,7 @@ def sort_values( na_position: NaPosition = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> None: - ... + ) -> None: ... @overload def sort_values( @@ -4754,8 +4739,7 @@ def sort_values( na_position: NaPosition = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> Self | None: - ... + ) -> Self | None: ... def sort_values( self, @@ -4925,8 +4909,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> None: - ... + ) -> None: ... @overload def sort_index( @@ -4941,8 +4924,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> Self: - ... + ) -> Self: ... @overload def sort_index( @@ -4957,8 +4939,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> Self | None: - ... + ) -> Self | None: ... def sort_index( self, @@ -5822,8 +5803,7 @@ def pipe( func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, - ) -> T: - ... + ) -> T: ... @overload def pipe( @@ -5831,8 +5811,7 @@ def pipe( func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, - ) -> T: - ... + ) -> T: ... @final @doc(klass=_shared_doc_kwargs["klass"]) @@ -6773,8 +6752,7 @@ def fillna( axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., - ) -> Self: - ... + ) -> Self: ... @overload def fillna( @@ -6785,8 +6763,7 @@ def fillna( axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., - ) -> None: - ... + ) -> None: ... @overload def fillna( @@ -6797,8 +6774,7 @@ def fillna( axis: Axis | None = ..., inplace: bool = ..., limit: int | None = ..., - ) -> Self | None: - ... + ) -> Self | None: ... @final @doc( @@ -7066,8 +7042,7 @@ def ffill( inplace: Literal[False] = ..., limit: None | int = ..., limit_area: Literal["inside", "outside"] | None = ..., - ) -> Self: - ... + ) -> Self: ... @overload def ffill( @@ -7077,8 +7052,7 @@ def ffill( inplace: Literal[True], limit: None | int = ..., limit_area: Literal["inside", "outside"] | None = ..., - ) -> None: - ... + ) -> None: ... @overload def ffill( @@ -7088,8 +7062,7 @@ def ffill( inplace: bool = ..., limit: None | int = ..., limit_area: Literal["inside", "outside"] | None = ..., - ) -> Self | None: - ... + ) -> Self | None: ... @final @doc( @@ -7198,8 +7171,7 @@ def bfill( inplace: Literal[False] = ..., limit: None | int = ..., limit_area: Literal["inside", "outside"] | None = ..., - ) -> Self: - ... + ) -> Self: ... @overload def bfill( @@ -7208,8 +7180,7 @@ def bfill( axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., - ) -> None: - ... + ) -> None: ... @overload def bfill( @@ -7219,8 +7190,7 @@ def bfill( inplace: bool = ..., limit: None | int = ..., limit_area: Literal["inside", "outside"] | None = ..., - ) -> Self | None: - ... + ) -> Self | None: ... @final @doc( @@ -7338,8 +7308,7 @@ def replace( limit: int | None = ..., regex: bool = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., - ) -> Self: - ... + ) -> Self: ... @overload def replace( @@ -7351,8 +7320,7 @@ def replace( limit: int | None = ..., regex: bool = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., - ) -> None: - ... + ) -> None: ... @overload def replace( @@ -7364,8 +7332,7 @@ def replace( limit: int | None = ..., regex: bool = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., - ) -> Self | None: - ... + ) -> Self | None: ... @final @doc( @@ -7626,8 +7593,7 @@ def interpolate( limit_direction: Literal["forward", "backward", "both"] | None = ..., limit_area: Literal["inside", "outside"] | None = ..., **kwargs, - ) -> Self: - ... + ) -> Self: ... @overload def interpolate( @@ -7640,8 +7606,7 @@ def interpolate( limit_direction: Literal["forward", "backward", "both"] | None = ..., limit_area: Literal["inside", "outside"] | None = ..., **kwargs, - ) -> None: - ... + ) -> None: ... @overload def interpolate( @@ -7654,8 +7619,7 @@ def interpolate( limit_direction: Literal["forward", "backward", "both"] | None = ..., limit_area: Literal["inside", "outside"] | None = ..., **kwargs, - ) -> Self | None: - ... + ) -> Self | None: ... @final def interpolate( @@ -8332,8 +8296,7 @@ def clip( axis: Axis | None = ..., inplace: Literal[False] = ..., **kwargs, - ) -> Self: - ... + ) -> Self: ... @overload def clip( @@ -8344,8 +8307,7 @@ def clip( axis: Axis | None = ..., inplace: Literal[True], **kwargs, - ) -> None: - ... + ) -> None: ... @overload def clip( @@ -8356,8 +8318,7 @@ def clip( axis: Axis | None = ..., inplace: bool = ..., **kwargs, - ) -> Self | None: - ... + ) -> Self | None: ... @final def clip( @@ -9722,8 +9683,7 @@ def _where( inplace: Literal[False] = ..., axis: Axis | None = ..., level=..., - ) -> Self: - ... + ) -> Self: ... @overload def _where( @@ -9734,8 +9694,7 @@ def _where( inplace: Literal[True], axis: Axis | None = ..., level=..., - ) -> None: - ... + ) -> None: ... @overload def _where( @@ -9746,8 +9705,7 @@ def _where( inplace: bool, axis: Axis | None = ..., level=..., - ) -> Self | None: - ... + ) -> Self | None: ... @final def _where( @@ -9909,8 +9867,7 @@ def where( inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., - ) -> Self: - ... + ) -> Self: ... @overload def where( @@ -9921,8 +9878,7 @@ def where( inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., - ) -> None: - ... + ) -> None: ... @overload def where( @@ -9933,8 +9889,7 @@ def where( inplace: bool = ..., axis: Axis | None = ..., level: Level = ..., - ) -> Self | None: - ... + ) -> Self | None: ... @final @doc( @@ -10115,8 +10070,7 @@ def mask( inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., - ) -> Self: - ... + ) -> Self: ... @overload def mask( @@ -10127,8 +10081,7 @@ def mask( inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., - ) -> None: - ... + ) -> None: ... @overload def mask( @@ -10139,8 +10092,7 @@ def mask( inplace: bool = ..., axis: Axis | None = ..., level: Level = ..., - ) -> Self | None: - ... + ) -> Self | None: ... @final @doc( diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 8b776dc7a9f79..bad9749b5ecee 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -1,6 +1,7 @@ """ Provide basic components for groupby. """ + from __future__ import annotations import dataclasses diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index d48592d1a61cb..64f55c1df4309 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -5,6 +5,7 @@ These are user facing as the result of the ``df.groupby(...)`` operations, which here returns a DataFrameGroupBy object. """ + from __future__ import annotations from collections import abc diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index c294ab855e003..46831b922d24e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -6,6 +6,7 @@ class providing the base-class of operations. (defined in pandas.core.groupby.generic) expose these user-facing objects to provide specific functionality. """ + from __future__ import annotations from collections.abc import ( @@ -802,8 +803,7 @@ def pipe( func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, - ) -> T: - ... + ) -> T: ... @overload def pipe( @@ -811,8 +811,7 @@ def pipe( func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, - ) -> T: - ... + ) -> T: ... @Substitution( klass="GroupBy", diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 1cf6df426f8b7..3040f9c64beff 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -2,6 +2,7 @@ Provide user facing operators for doing the split part of the split-apply-combine paradigm. """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py index 3b7a58e87603e..b22fc9248eeca 100644 --- a/pandas/core/groupby/numba_.py +++ b/pandas/core/groupby/numba_.py @@ -1,4 +1,5 @@ """Common utilities for Numba operations with groupby ops""" + from __future__ import annotations import functools diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index fc5747595ad02..acf4c7bebf52d 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -5,6 +5,7 @@ operations, primarily in cython. These classes (BaseGrouper and BinGrouper) are contained *in* the SeriesGroupBy and DataFrameGroupBy objects. """ + from __future__ import annotations import collections diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py index 3dd256e9ce45d..2e6bcda520aba 100644 --- a/pandas/core/indexers/objects.py +++ b/pandas/core/indexers/objects.py @@ -1,4 +1,5 @@ """Indexer objects for computing start/end window bounds for rolling operations""" + from __future__ import annotations from datetime import timedelta diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py index 78dbe3a1ca632..b089be3469d87 100644 --- a/pandas/core/indexers/utils.py +++ b/pandas/core/indexers/utils.py @@ -1,6 +1,7 @@ """ Low-dependency indexing utilities. """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 5881f5e040370..59d6e313a2d93 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -1,6 +1,7 @@ """ datetimelike delegation """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c72c5fa019bd7..052ecbafa686a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1767,16 +1767,13 @@ def _set_names(self, values, *, level=None) -> None: names = property(fset=_set_names, fget=_get_names) @overload - def set_names(self, names, *, level=..., inplace: Literal[False] = ...) -> Self: - ... + def set_names(self, names, *, level=..., inplace: Literal[False] = ...) -> Self: ... @overload - def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: - ... + def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... @overload - def set_names(self, names, *, level=..., inplace: bool = ...) -> Self | None: - ... + def set_names(self, names, *, level=..., inplace: bool = ...) -> Self | None: ... def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None: """ @@ -1883,12 +1880,10 @@ def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None: return None @overload - def rename(self, name, *, inplace: Literal[False] = ...) -> Self: - ... + def rename(self, name, *, inplace: Literal[False] = ...) -> Self: ... @overload - def rename(self, name, *, inplace: Literal[True]) -> None: - ... + def rename(self, name, *, inplace: Literal[True]) -> None: ... def rename(self, name, *, inplace: bool = False) -> Self | None: """ @@ -4110,8 +4105,7 @@ def join( level: Level = ..., return_indexers: Literal[True], sort: bool = ..., - ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: - ... + ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... @overload def join( @@ -4122,8 +4116,7 @@ def join( level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., - ) -> Index: - ... + ) -> Index: ... @overload def join( @@ -4134,8 +4127,9 @@ def join( level: Level = ..., return_indexers: bool = ..., sort: bool = ..., - ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: - ... + ) -> ( + Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None] + ): ... @final @_maybe_return_indexers @@ -5452,8 +5446,7 @@ def sort_values( ascending: bool = ..., na_position: NaPosition = ..., key: Callable | None = ..., - ) -> Self: - ... + ) -> Self: ... @overload def sort_values( @@ -5463,8 +5456,7 @@ def sort_values( ascending: bool = ..., na_position: NaPosition = ..., key: Callable | None = ..., - ) -> tuple[Self, np.ndarray]: - ... + ) -> tuple[Self, np.ndarray]: ... @overload def sort_values( @@ -5474,8 +5466,7 @@ def sort_values( ascending: bool = ..., na_position: NaPosition = ..., key: Callable | None = ..., - ) -> Self | tuple[Self, np.ndarray]: - ... + ) -> Self | tuple[Self, np.ndarray]: ... def sort_values( self, @@ -5872,20 +5863,17 @@ def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: @overload def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... - ) -> npt.NDArray[np.intp]: - ... + ) -> npt.NDArray[np.intp]: ... @overload def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] - ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: - ... + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... @overload def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True - ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: - ... + ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... @final def _get_indexer_non_comparable( diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index a17b585fb1166..7e8d808769bc1 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -1,6 +1,7 @@ """ Base and utility classes for tseries type pandas objects. """ + from __future__ import annotations from abc import ( @@ -148,8 +149,7 @@ def freqstr(self) -> str: @cache_readonly @abstractmethod - def _resolution_obj(self) -> Resolution: - ... + def _resolution_obj(self) -> Resolution: ... @cache_readonly @doc(DatetimeLikeArrayMixin.resolution) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index d6fbeb9043bc6..fc806a3546571 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -1,6 +1,7 @@ """ Shared methods for Index subclasses backed by ExtensionArray. """ + from __future__ import annotations from inspect import signature diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index ea3e848356ab5..36f181110eccd 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,4 +1,5 @@ -""" define the IntervalIndex """ +"""define the IntervalIndex""" + from __future__ import annotations from operator import ( diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 0781a86e5d57e..24f53f16e1985 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -578,8 +578,7 @@ def sort_values( ascending: bool = ..., na_position: NaPosition = ..., key: Callable | None = ..., - ) -> Self: - ... + ) -> Self: ... @overload def sort_values( @@ -589,8 +588,7 @@ def sort_values( ascending: bool = ..., na_position: NaPosition = ..., key: Callable | None = ..., - ) -> tuple[Self, np.ndarray | RangeIndex]: - ... + ) -> tuple[Self, np.ndarray | RangeIndex]: ... @overload def sort_values( @@ -600,8 +598,7 @@ def sort_values( ascending: bool = ..., na_position: NaPosition = ..., key: Callable | None = ..., - ) -> Self | tuple[Self, np.ndarray | RangeIndex]: - ... + ) -> Self | tuple[Self, np.ndarray | RangeIndex]: ... def sort_values( self, diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a929687544876..4a4b0ac1444d6 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,4 +1,5 @@ -""" implement the TimedeltaIndex """ +"""implement the TimedeltaIndex""" + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py index b296e6016a1ac..a952887d7eed2 100644 --- a/pandas/core/interchange/from_dataframe.py +++ b/pandas/core/interchange/from_dataframe.py @@ -468,8 +468,7 @@ def set_nulls( col: Column, validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool = ..., -) -> np.ndarray: - ... +) -> np.ndarray: ... @overload @@ -478,8 +477,7 @@ def set_nulls( col: Column, validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool = ..., -) -> pd.Series: - ... +) -> pd.Series: ... @overload @@ -488,8 +486,7 @@ def set_nulls( col: Column, validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool = ..., -) -> np.ndarray | pd.Series: - ... +) -> np.ndarray | pd.Series: ... def set_nulls( diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py index b0b3937ca47ea..d6e1e8b38dfe3 100644 --- a/pandas/core/internals/api.py +++ b/pandas/core/internals/api.py @@ -6,6 +6,7 @@ 2) Use only functions exposed here (or in core.internals) """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 6bc3556902e80..93f1674fbd328 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -2,6 +2,7 @@ Functions for preparing various inputs passed to the DataFrame or Series constructors before passing them to a BlockManager. """ + from __future__ import annotations from collections import abc diff --git a/pandas/core/methods/describe.py b/pandas/core/methods/describe.py index 337b2f7952213..b69c9dbdaf6fd 100644 --- a/pandas/core/methods/describe.py +++ b/pandas/core/methods/describe.py @@ -3,6 +3,7 @@ Method NDFrame.describe() delegates actual execution to function describe_ndframe(). """ + from __future__ import annotations from abc import ( diff --git a/pandas/core/methods/to_dict.py b/pandas/core/methods/to_dict.py index a88cf88ead66e..a5833514a9799 100644 --- a/pandas/core/methods/to_dict.py +++ b/pandas/core/methods/to_dict.py @@ -59,8 +59,7 @@ def to_dict( *, into: type[MutableMappingT] | MutableMappingT, index: bool = ..., -) -> MutableMappingT: - ... +) -> MutableMappingT: ... @overload @@ -70,8 +69,7 @@ def to_dict( *, into: type[MutableMappingT] | MutableMappingT, index: bool = ..., -) -> list[MutableMappingT]: - ... +) -> list[MutableMappingT]: ... @overload @@ -81,8 +79,7 @@ def to_dict( *, into: type[dict] = ..., index: bool = ..., -) -> dict: - ... +) -> dict: ... @overload @@ -92,8 +89,7 @@ def to_dict( *, into: type[dict] = ..., index: bool = ..., -) -> list[dict]: - ... +) -> list[dict]: ... # error: Incompatible default for argument "into" (default has type "type[dict diff --git a/pandas/core/missing.py b/pandas/core/missing.py index cdc2ff6c51b06..3a5bf64520d75 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -1,6 +1,7 @@ """ Routines for filling missing data. """ + from __future__ import annotations from functools import wraps @@ -141,8 +142,7 @@ def clean_fill_method( method: Literal["ffill", "pad", "bfill", "backfill"], *, allow_nearest: Literal[False] = ..., -) -> Literal["pad", "backfill"]: - ... +) -> Literal["pad", "backfill"]: ... @overload @@ -150,8 +150,7 @@ def clean_fill_method( method: Literal["ffill", "pad", "bfill", "backfill", "nearest"], *, allow_nearest: Literal[True], -) -> Literal["pad", "backfill", "nearest"]: - ... +) -> Literal["pad", "backfill", "nearest"]: ... def clean_fill_method( diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index ae889a7fdbc24..34a0bb1f45e2c 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -3,6 +3,7 @@ This is not a public API. """ + from __future__ import annotations from pandas.core.ops.array_ops import ( diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 034a231f04488..810e30d369729 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -2,6 +2,7 @@ Functions for arithmetic and comparison operations on NumPy arrays and ExtensionArrays. """ + from __future__ import annotations import datetime diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py index fa085a1f0262b..d19ac6246e1cd 100644 --- a/pandas/core/ops/common.py +++ b/pandas/core/ops/common.py @@ -1,6 +1,7 @@ """ Boilerplate functions used in defining binary operations. """ + from __future__ import annotations from functools import wraps diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py index a939fdd3d041e..ebafc432dd89b 100644 --- a/pandas/core/ops/dispatch.py +++ b/pandas/core/ops/dispatch.py @@ -1,6 +1,7 @@ """ Functions for defining unary operations. """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py index 5e97d1b67d826..8063a52a02163 100644 --- a/pandas/core/ops/docstrings.py +++ b/pandas/core/ops/docstrings.py @@ -1,6 +1,7 @@ """ Templating for ops docstrings """ + from __future__ import annotations @@ -419,12 +420,12 @@ def make_flex_doc(op_name: str, typ: str) -> str: if reverse_op is not None: _op_descriptions[reverse_op] = _op_descriptions[key].copy() _op_descriptions[reverse_op]["reverse"] = key - _op_descriptions[key][ - "see_also_desc" - ] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}" - _op_descriptions[reverse_op][ - "see_also_desc" - ] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}" + _op_descriptions[key]["see_also_desc"] = ( + f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}" + ) + _op_descriptions[reverse_op]["see_also_desc"] = ( + f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}" + ) _flex_doc_SERIES = """ Return {desc} of series and other, element-wise (binary operator `{op_name}`). diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py index 8af95de285938..7b3af99ee1a95 100644 --- a/pandas/core/ops/invalid.py +++ b/pandas/core/ops/invalid.py @@ -1,6 +1,7 @@ """ Templates for invalid operations. """ + from __future__ import annotations import operator diff --git a/pandas/core/ops/mask_ops.py b/pandas/core/ops/mask_ops.py index e5d0626ad9119..427ae2fb87e55 100644 --- a/pandas/core/ops/mask_ops.py +++ b/pandas/core/ops/mask_ops.py @@ -1,6 +1,7 @@ """ Ops for masked arrays. """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 0404da189dfa5..a4e9e5305f74d 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -21,6 +21,7 @@ 3) divmod behavior consistent with 1) and 2). """ + from __future__ import annotations import operator diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 4c87af9ff14c7..43077e7aeecb4 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -257,8 +257,7 @@ def pipe( func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, - ) -> T: - ... + ) -> T: ... @overload def pipe( @@ -266,8 +265,7 @@ def pipe( func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, - ) -> T: - ... + ) -> T: ... @final @Substitution( @@ -2355,15 +2353,13 @@ def _set_grouper( @overload def _take_new_index( obj: DataFrame, indexer: npt.NDArray[np.intp], new_index: Index -) -> DataFrame: - ... +) -> DataFrame: ... @overload def _take_new_index( obj: Series, indexer: npt.NDArray[np.intp], new_index: Index -) -> Series: - ... +) -> Series: ... def _take_new_index( diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 88323e5304cc4..8758ba3a475a6 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -1,6 +1,7 @@ """ Concat routines. """ + from __future__ import annotations from collections import abc @@ -80,8 +81,7 @@ def concat( verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -97,8 +97,7 @@ def concat( verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., -) -> Series: - ... +) -> Series: ... @overload @@ -114,8 +113,7 @@ def concat( verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., -) -> DataFrame | Series: - ... +) -> DataFrame | Series: ... @overload @@ -131,8 +129,7 @@ def concat( verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -148,8 +145,7 @@ def concat( verify_integrity: bool = ..., sort: bool = ..., copy: bool | None = ..., -) -> DataFrame | Series: - ... +) -> DataFrame | Series: ... def concat( diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index d54bfec389a38..8ea2ac24e13c8 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1,6 +1,7 @@ """ SQL-style merge routines """ + from __future__ import annotations from collections.abc import ( diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index c770acb638b46..b28010c13d6dd 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -490,15 +490,13 @@ def _unstack_multiple( @overload -def unstack(obj: Series, level, fill_value=..., sort: bool = ...) -> DataFrame: - ... +def unstack(obj: Series, level, fill_value=..., sort: bool = ...) -> DataFrame: ... @overload def unstack( obj: Series | DataFrame, level, fill_value=..., sort: bool = ... -) -> Series | DataFrame: - ... +) -> Series | DataFrame: ... def unstack( diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 82c697306edb2..1499afbde56d3 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -1,6 +1,7 @@ """ Quantilization functions and related stuff """ + from __future__ import annotations from typing import ( diff --git a/pandas/core/roperator.py b/pandas/core/roperator.py index 2f320f4e9c6b9..9ea4bea41cdea 100644 --- a/pandas/core/roperator.py +++ b/pandas/core/roperator.py @@ -2,6 +2,7 @@ Reversed Operations not available in the stdlib operator module. Defining these instead of using lambdas allows us to reference them by name. """ + from __future__ import annotations import operator diff --git a/pandas/core/sample.py b/pandas/core/sample.py index eebbed3512c4e..5b1c4b6a331f5 100644 --- a/pandas/core/sample.py +++ b/pandas/core/sample.py @@ -1,6 +1,7 @@ """ Module containing utilities for NDFrame.sample() and .GroupBy.sample() """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/core/series.py b/pandas/core/series.py index d7aed54da9014..699ff413efb91 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1,6 +1,7 @@ """ Data structure for 1-dimensional cross-sectional and time series data """ + from __future__ import annotations from collections.abc import ( @@ -1282,8 +1283,7 @@ def reset_index( name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., - ) -> DataFrame: - ... + ) -> DataFrame: ... @overload def reset_index( @@ -1294,8 +1294,7 @@ def reset_index( name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., - ) -> Series: - ... + ) -> Series: ... @overload def reset_index( @@ -1306,8 +1305,7 @@ def reset_index( name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., - ) -> None: - ... + ) -> None: ... def reset_index( self, @@ -1487,8 +1485,7 @@ def to_string( name=..., max_rows: int | None = ..., min_rows: int | None = ..., - ) -> str: - ... + ) -> str: ... @overload def to_string( @@ -1504,8 +1501,7 @@ def to_string( name=..., max_rows: int | None = ..., min_rows: int | None = ..., - ) -> None: - ... + ) -> None: ... @deprecate_nonkeyword_arguments( version="3.0.0", allowed_args=["self", "buf"], name="to_string" @@ -1603,8 +1599,7 @@ def to_markdown( index: bool = ..., storage_options: StorageOptions | None = ..., **kwargs, - ) -> str: - ... + ) -> str: ... @overload def to_markdown( @@ -1615,8 +1610,7 @@ def to_markdown( index: bool = ..., storage_options: StorageOptions | None = ..., **kwargs, - ) -> None: - ... + ) -> None: ... @overload def to_markdown( @@ -1627,8 +1621,7 @@ def to_markdown( index: bool = ..., storage_options: StorageOptions | None = ..., **kwargs, - ) -> str | None: - ... + ) -> str | None: ... @doc( klass=_shared_doc_kwargs["klass"], @@ -1759,12 +1752,10 @@ def keys(self) -> Index: @overload def to_dict( self, *, into: type[MutableMappingT] | MutableMappingT - ) -> MutableMappingT: - ... + ) -> MutableMappingT: ... @overload - def to_dict(self, *, into: type[dict] = ...) -> dict: - ... + def to_dict(self, *, into: type[dict] = ...) -> dict: ... # error: Incompatible default for argument "into" (default has type "type[ # dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") @@ -2140,20 +2131,17 @@ def drop_duplicates( keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., - ) -> Series: - ... + ) -> Series: ... @overload def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... - ) -> None: - ... + ) -> None: ... @overload def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... - ) -> Series | None: - ... + ) -> Series | None: ... def drop_duplicates( self, @@ -2539,24 +2527,21 @@ def round(self, decimals: int = 0, *args, **kwargs) -> Series: @overload def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... - ) -> float: - ... + ) -> float: ... @overload def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., - ) -> Series: - ... + ) -> Series: ... @overload def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., - ) -> float | Series: - ... + ) -> float | Series: ... def quantile( self, @@ -3369,8 +3354,7 @@ def sort_values( na_position: NaPosition = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> Series: - ... + ) -> Series: ... @overload def sort_values( @@ -3383,8 +3367,7 @@ def sort_values( na_position: NaPosition = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> None: - ... + ) -> None: ... @overload def sort_values( @@ -3397,8 +3380,7 @@ def sort_values( na_position: NaPosition = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., - ) -> Series | None: - ... + ) -> Series | None: ... def sort_values( self, @@ -3607,8 +3589,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> None: - ... + ) -> None: ... @overload def sort_index( @@ -3623,8 +3604,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> Series: - ... + ) -> Series: ... @overload def sort_index( @@ -3639,8 +3619,7 @@ def sort_index( sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., - ) -> Series | None: - ... + ) -> Series | None: ... def sort_index( self, @@ -4668,8 +4647,7 @@ def rename( inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., - ) -> None: - ... + ) -> None: ... @overload def rename( @@ -4681,8 +4659,7 @@ def rename( inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., - ) -> Series: - ... + ) -> Series: ... @overload def rename( @@ -4694,8 +4671,7 @@ def rename( inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., - ) -> Series | None: - ... + ) -> Series | None: ... def rename( self, @@ -4874,8 +4850,7 @@ def rename_axis( axis: Axis = ..., copy: bool = ..., inplace: Literal[True], - ) -> None: - ... + ) -> None: ... @overload def rename_axis( @@ -4886,8 +4861,7 @@ def rename_axis( axis: Axis = ..., copy: bool = ..., inplace: Literal[False] = ..., - ) -> Self: - ... + ) -> Self: ... @overload def rename_axis( @@ -4898,8 +4872,7 @@ def rename_axis( axis: Axis = ..., copy: bool = ..., inplace: bool = ..., - ) -> Self | None: - ... + ) -> Self | None: ... def rename_axis( self, @@ -4989,8 +4962,7 @@ def drop( level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., - ) -> None: - ... + ) -> None: ... @overload def drop( @@ -5003,8 +4975,7 @@ def drop( level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., - ) -> Series: - ... + ) -> Series: ... @overload def drop( @@ -5017,8 +4988,7 @@ def drop( level: Level | None = ..., inplace: bool = ..., errors: IgnoreRaise = ..., - ) -> Series | None: - ... + ) -> Series | None: ... def drop( self, @@ -5172,20 +5142,17 @@ def info( @overload def _replace_single( self, to_replace, method: str, inplace: Literal[False], limit - ) -> Self: - ... + ) -> Self: ... @overload def _replace_single( self, to_replace, method: str, inplace: Literal[True], limit - ) -> None: - ... + ) -> None: ... @overload def _replace_single( self, to_replace, method: str, inplace: bool, limit - ) -> Self | None: - ... + ) -> Self | None: ... # TODO(3.0): this can be removed once GH#33302 deprecation is enforced def _replace_single( @@ -5591,8 +5558,7 @@ def dropna( inplace: Literal[False] = ..., how: AnyAll | None = ..., ignore_index: bool = ..., - ) -> Series: - ... + ) -> Series: ... @overload def dropna( @@ -5602,8 +5568,7 @@ def dropna( inplace: Literal[True], how: AnyAll | None = ..., ignore_index: bool = ..., - ) -> None: - ... + ) -> None: ... def dropna( self, diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 92ca014e30c1a..7034de365b0c1 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -1,4 +1,5 @@ -""" miscellaneous sorting / groupby utilities """ +"""miscellaneous sorting / groupby utilities""" + from __future__ import annotations from collections import defaultdict diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index c416db4083f9a..b8b1d39d4eb20 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -622,8 +622,7 @@ def to_datetime( unit: str | None = ..., origin=..., cache: bool = ..., -) -> Timestamp: - ... +) -> Timestamp: ... @overload @@ -638,8 +637,7 @@ def to_datetime( unit: str | None = ..., origin=..., cache: bool = ..., -) -> Series: - ... +) -> Series: ... @overload @@ -654,8 +652,7 @@ def to_datetime( unit: str | None = ..., origin=..., cache: bool = ..., -) -> DatetimeIndex: - ... +) -> DatetimeIndex: ... def to_datetime( diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 5f3963c3d405e..409a27ea64488 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -1,6 +1,7 @@ """ timedelta support tools """ + from __future__ import annotations from typing import ( @@ -53,8 +54,7 @@ def to_timedelta( arg: str | float | timedelta, unit: UnitChoices | None = ..., errors: DateTimeErrorChoices = ..., -) -> Timedelta: - ... +) -> Timedelta: ... @overload @@ -62,8 +62,7 @@ def to_timedelta( arg: Series, unit: UnitChoices | None = ..., errors: DateTimeErrorChoices = ..., -) -> Series: - ... +) -> Series: ... @overload @@ -71,8 +70,7 @@ def to_timedelta( arg: list | tuple | range | ArrayLike | Index, unit: UnitChoices | None = ..., errors: DateTimeErrorChoices = ..., -) -> TimedeltaIndex: - ... +) -> TimedeltaIndex: ... def to_timedelta( diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 4933de3212581..f7e9ff220eded 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -1,6 +1,7 @@ """ data hash pandas / numpy objects """ + from __future__ import annotations import itertools diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index 4825c9fee24b1..a6079785e7475 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -1,4 +1,5 @@ """Common utilities for Numba operations""" + from __future__ import annotations import types diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index fc8eddca09c84..004a3555f0212 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -1,4 +1,5 @@ """Common utility functions for rolling operations""" + from __future__ import annotations from collections import defaultdict diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py index c3ccb471c973e..cdb670ee218b4 100644 --- a/pandas/core/window/doc.py +++ b/pandas/core/window/doc.py @@ -1,4 +1,5 @@ """Any shareable docstring components for rolling/expanding/ewm""" + from __future__ import annotations from textwrap import dedent diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index ac2c10447dee9..52eb8cf45d170 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2,6 +2,7 @@ Provide a generic structure to support window functions, similar to how we have a Groupby object. """ + from __future__ import annotations import copy diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 6d124bec72137..402bbdb872a18 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -1,6 +1,7 @@ """ Expose public exceptions & warnings """ + from __future__ import annotations import ctypes diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 8e8b22967ea01..aa20ec237e968 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,4 +1,5 @@ -""" io on the clipboard """ +"""io on the clipboard""" + from __future__ import annotations from io import StringIO diff --git a/pandas/io/common.py b/pandas/io/common.py index 682780a409a8b..3544883afedd6 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -1,4 +1,5 @@ """Common IO api utilities""" + from __future__ import annotations from abc import ( @@ -176,13 +177,11 @@ def is_url(url: object) -> bool: @overload -def _expand_user(filepath_or_buffer: str) -> str: - ... +def _expand_user(filepath_or_buffer: str) -> str: ... @overload -def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: - ... +def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: ... def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT: @@ -234,15 +233,15 @@ def validate_header_arg(header: object) -> None: @overload -def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str: - ... +def stringify_path( + filepath_or_buffer: FilePath, convert_file_like: bool = ... +) -> str: ... @overload def stringify_path( filepath_or_buffer: BaseBufferT, convert_file_like: bool = ... -) -> BaseBufferT: - ... +) -> BaseBufferT: ... def stringify_path( @@ -627,8 +626,7 @@ def get_handle( is_text: Literal[False], errors: str | None = ..., storage_options: StorageOptions = ..., -) -> IOHandles[bytes]: - ... +) -> IOHandles[bytes]: ... @overload @@ -642,8 +640,7 @@ def get_handle( is_text: Literal[True] = ..., errors: str | None = ..., storage_options: StorageOptions = ..., -) -> IOHandles[str]: - ... +) -> IOHandles[str]: ... @overload @@ -657,8 +654,7 @@ def get_handle( is_text: bool = ..., errors: str | None = ..., storage_options: StorageOptions = ..., -) -> IOHandles[str] | IOHandles[bytes]: - ... +) -> IOHandles[str] | IOHandles[bytes]: ... @doc(compression_options=_shared_docs["compression_options"] % "path_or_buf") @@ -953,8 +949,7 @@ class _BufferedWriter(BytesIO, ABC): # type: ignore[misc] buffer = BytesIO() @abstractmethod - def write_to_buffer(self) -> None: - ... + def write_to_buffer(self) -> None: ... def close(self) -> None: if self.closed: diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index d77a955e41b00..2977f62b4d3c5 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -406,8 +406,7 @@ def read_excel( skipfooter: int = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -445,8 +444,7 @@ def read_excel( skipfooter: int = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., -) -> dict[IntStrT, DataFrame]: - ... +) -> dict[IntStrT, DataFrame]: ... @doc(storage_options=_shared_docs["storage_options"]) @@ -1369,7 +1367,7 @@ def close(self) -> None: b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2 b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3 b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4 - b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1", # Compound File Binary ) ZIP_SIGNATURE = b"PK\x03\x04" PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,))) diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index bc7dca2d95b6b..cdb22a57399ed 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -238,12 +238,10 @@ def _make_table_cell(self, cell) -> tuple[object, Any]: ) @overload - def _process_style(self, style: dict[str, Any]) -> str: - ... + def _process_style(self, style: dict[str, Any]) -> str: ... @overload - def _process_style(self, style: None) -> None: - ... + def _process_style(self, style: None) -> None: ... def _process_style(self, style: dict[str, Any] | None) -> str | None: """Convert a style dictionary to a OpenDocument style sheet diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index 95d43f60a22c5..f879f16aa5dc8 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -161,23 +161,19 @@ def _range2cols(areas: str) -> list[int]: @overload -def maybe_convert_usecols(usecols: str | list[int]) -> list[int]: - ... +def maybe_convert_usecols(usecols: str | list[int]) -> list[int]: ... @overload -def maybe_convert_usecols(usecols: list[str]) -> list[str]: - ... +def maybe_convert_usecols(usecols: list[str]) -> list[str]: ... @overload -def maybe_convert_usecols(usecols: usecols_func) -> usecols_func: - ... +def maybe_convert_usecols(usecols: usecols_func) -> usecols_func: ... @overload -def maybe_convert_usecols(usecols: None) -> None: - ... +def maybe_convert_usecols(usecols: None) -> None: ... def maybe_convert_usecols( @@ -212,13 +208,11 @@ def maybe_convert_usecols( @overload -def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]: - ... +def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]: ... @overload -def validate_freeze_panes(freeze_panes: None) -> Literal[False]: - ... +def validate_freeze_panes(freeze_panes: None) -> Literal[False]: ... def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool: diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 89cb044511a25..b42dbaa579ee7 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -1,4 +1,5 @@ -""" feather-format compat """ +"""feather-format compat""" + from __future__ import annotations from typing import ( diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index 2a6cbe0762903..99a790388f3f1 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -1,6 +1,7 @@ """ Internal module for console introspection """ + from __future__ import annotations from shutil import get_terminal_size diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py index 0c6885d789f15..dc18ef2fcd4fc 100644 --- a/pandas/io/formats/css.py +++ b/pandas/io/formats/css.py @@ -1,6 +1,7 @@ """ Utilities for interpreting CSS from Stylers for formatting non-HTML outputs. """ + from __future__ import annotations import re diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index ee7739df49389..b6c6112b05ab3 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -1,6 +1,7 @@ """ Utilities for conversion to writer-agnostic Excel representation. """ + from __future__ import annotations from collections.abc import ( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 04d5fcae1a50d..8566751b9f33e 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -2,6 +2,7 @@ Internal module for formatting output data in csv, html, xml, and latex files. This module also applies to display formatting. """ + from __future__ import annotations from collections.abc import ( diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 794ce77b3b45e..adaeed017d7bf 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -1,6 +1,7 @@ """ Module for formatting output data in HTML. """ + from __future__ import annotations from textwrap import dedent diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 1119cb0ba9b9d..b30351e14332d 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -1,6 +1,7 @@ """ Printing tools. """ + from __future__ import annotations from collections.abc import ( diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py index cdad388592717..ca41726de08cf 100644 --- a/pandas/io/formats/string.py +++ b/pandas/io/formats/string.py @@ -1,6 +1,7 @@ """ Module for formatting output data in console (to string). """ + from __future__ import annotations from shutil import get_terminal_size diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 08a3edd30c311..7247e11be874e 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1,6 +1,7 @@ """ Module for applying conditional formatting to DataFrames and Series. """ + from __future__ import annotations from contextlib import contextmanager @@ -618,8 +619,7 @@ def to_latex( environment: str | None = ..., encoding: str | None = ..., convert_css: bool = ..., - ) -> None: - ... + ) -> None: ... @overload def to_latex( @@ -641,8 +641,7 @@ def to_latex( environment: str | None = ..., encoding: str | None = ..., convert_css: bool = ..., - ) -> str: - ... + ) -> str: ... def to_latex( self, @@ -1234,8 +1233,7 @@ def to_html( doctype_html: bool = ..., exclude_styles: bool = ..., **kwargs, - ) -> None: - ... + ) -> None: ... @overload def to_html( @@ -1254,8 +1252,7 @@ def to_html( doctype_html: bool = ..., exclude_styles: bool = ..., **kwargs, - ) -> str: - ... + ) -> str: ... @Substitution(buf=buffering_args, encoding=encoding_args) def to_html( @@ -1414,8 +1411,7 @@ def to_string( max_rows: int | None = ..., max_columns: int | None = ..., delimiter: str = ..., - ) -> None: - ... + ) -> None: ... @overload def to_string( @@ -1428,8 +1424,7 @@ def to_string( max_rows: int | None = ..., max_columns: int | None = ..., delimiter: str = ..., - ) -> str: - ... + ) -> str: ... @Substitution(buf=buffering_args, encoding=encoding_args) def to_string( @@ -3629,8 +3624,7 @@ def pipe( func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, - ) -> T: - ... + ) -> T: ... @overload def pipe( @@ -3638,8 +3632,7 @@ def pipe( func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, - ) -> T: - ... + ) -> T: ... def pipe( self, diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index fe03ba519629d..2c93dbe74eace 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -314,9 +314,9 @@ def _translate( max_cols, ) - self.cellstyle_map_columns: DefaultDict[ - tuple[CSSPair, ...], list[str] - ] = defaultdict(list) + self.cellstyle_map_columns: DefaultDict[tuple[CSSPair, ...], list[str]] = ( + defaultdict(list) + ) head = self._translate_header(sparse_cols, max_cols) d.update({"head": head}) @@ -329,9 +329,9 @@ def _translate( self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict( list ) - self.cellstyle_map_index: DefaultDict[ - tuple[CSSPair, ...], list[str] - ] = defaultdict(list) + self.cellstyle_map_index: DefaultDict[tuple[CSSPair, ...], list[str]] = ( + defaultdict(list) + ) body: list = self._translate_body(idx_lengths, max_rows, max_cols) d.update({"body": body}) @@ -776,9 +776,9 @@ def _generate_body_row( ) if self.cell_ids: - header_element[ - "id" - ] = f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given + header_element["id"] = ( + f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given + ) if ( header_element_visible and (r, c) in self.ctx_index diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py index e55561902d4d3..702430642a597 100644 --- a/pandas/io/formats/xml.py +++ b/pandas/io/formats/xml.py @@ -1,6 +1,7 @@ """ :mod:`pandas.io.formats.xml` is a module for formatting data in XML. """ + from __future__ import annotations import codecs diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 0e1426d31f0ee..8f4028c1ead3a 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -107,8 +107,7 @@ def to_json( indent: int = ..., storage_options: StorageOptions = ..., mode: Literal["a", "w"] = ..., -) -> None: - ... +) -> None: ... @overload @@ -127,8 +126,7 @@ def to_json( indent: int = ..., storage_options: StorageOptions = ..., mode: Literal["a", "w"] = ..., -) -> str: - ... +) -> str: ... def to_json( @@ -415,8 +413,7 @@ def read_json( storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., -) -> JsonReader[Literal["frame"]]: - ... +) -> JsonReader[Literal["frame"]]: ... @overload @@ -440,8 +437,7 @@ def read_json( storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., -) -> JsonReader[Literal["series"]]: - ... +) -> JsonReader[Literal["series"]]: ... @overload @@ -465,8 +461,7 @@ def read_json( storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., -) -> Series: - ... +) -> Series: ... @overload @@ -490,8 +485,7 @@ def read_json( storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., -) -> DataFrame: - ... +) -> DataFrame: ... @doc( @@ -922,16 +916,13 @@ def _combine_lines(self, lines) -> str: ) @overload - def read(self: JsonReader[Literal["frame"]]) -> DataFrame: - ... + def read(self: JsonReader[Literal["frame"]]) -> DataFrame: ... @overload - def read(self: JsonReader[Literal["series"]]) -> Series: - ... + def read(self: JsonReader[Literal["series"]]) -> Series: ... @overload - def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: - ... + def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: ... def read(self) -> DataFrame | Series: """ @@ -1016,16 +1007,15 @@ def __iter__(self) -> Self: return self @overload - def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: - ... + def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: ... @overload - def __next__(self: JsonReader[Literal["series"]]) -> Series: - ... + def __next__(self: JsonReader[Literal["series"]]) -> Series: ... @overload - def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: - ... + def __next__( + self: JsonReader[Literal["frame", "series"]], + ) -> DataFrame | Series: ... def __next__(self) -> DataFrame | Series: if self.nrows and self.nrows_seen >= self.nrows: diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index f784004487646..ef717dd9b7ef8 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -53,8 +53,7 @@ def nested_to_record( sep: str = ..., level: int = ..., max_level: int | None = ..., -) -> dict[str, Any]: - ... +) -> dict[str, Any]: ... @overload @@ -64,8 +63,7 @@ def nested_to_record( sep: str = ..., level: int = ..., max_level: int | None = ..., -) -> list[dict[str, Any]]: - ... +) -> list[dict[str, Any]]: ... def nested_to_record( diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index a3b912dec66fd..d4b412404c308 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -3,6 +3,7 @@ https://specs.frictionlessdata.io/table-schema/ """ + from __future__ import annotations from typing import ( diff --git a/pandas/io/orc.py b/pandas/io/orc.py index ed9bc21075e73..9e9a43644f694 100644 --- a/pandas/io/orc.py +++ b/pandas/io/orc.py @@ -1,4 +1,5 @@ -""" orc compat """ +"""orc compat""" + from __future__ import annotations import io diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 8052da25f0368..08983ceed44e5 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -1,4 +1,5 @@ -""" parquet compat """ +"""parquet compat""" + from __future__ import annotations import io diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py index 85b6afeec1ab9..f8263a65ef5c7 100644 --- a/pandas/io/parsers/arrow_parser_wrapper.py +++ b/pandas/io/parsers/arrow_parser_wrapper.py @@ -99,9 +99,9 @@ def _get_pyarrow_options(self) -> None: if callable(on_bad_lines): self.parse_options["invalid_row_handler"] = on_bad_lines elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR: - self.parse_options[ - "invalid_row_handler" - ] = None # PyArrow raises an exception by default + self.parse_options["invalid_row_handler"] = ( + None # PyArrow raises an exception by default + ) elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN: def handle_warning(invalid_row) -> str: diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 70a90a3e37d62..7b06c6b6b0d39 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -859,16 +859,14 @@ def _do_date_conversions( self, names: Index, data: DataFrame, - ) -> tuple[Sequence[Hashable] | Index, DataFrame]: - ... + ) -> tuple[Sequence[Hashable] | Index, DataFrame]: ... @overload def _do_date_conversions( self, names: Sequence[Hashable], data: Mapping[Hashable, ArrayLike], - ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]: - ... + ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]: ... @final def _do_date_conversions( @@ -927,14 +925,12 @@ def _evaluate_usecols( self, usecols: Callable[[Hashable], object], names: Iterable[Hashable], - ) -> set[int]: - ... + ) -> set[int]: ... @overload def _evaluate_usecols( self, usecols: SequenceT, names: Iterable[Hashable] - ) -> SequenceT: - ... + ) -> SequenceT: ... @final def _evaluate_usecols( diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 8995faa7ad346..539d9abf84f90 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -3,6 +3,7 @@ GH#48849 provides a convenient way of deprecating keyword arguments """ + from __future__ import annotations from collections import ( @@ -111,9 +112,9 @@ class _read_shared(TypedDict, Generic[HashableT], total=False): skiprows: list[int] | int | Callable[[Hashable], bool] | None skipfooter: int nrows: int | None - na_values: Hashable | Iterable[Hashable] | Mapping[ - Hashable, Iterable[Hashable] - ] | None + na_values: ( + Hashable | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None + ) keep_default_na: bool na_filter: bool verbose: bool | lib.NoDefault @@ -568,18 +569,15 @@ class _DeprecationConfig(NamedTuple): @overload -def validate_integer(name: str, val: None, min_val: int = ...) -> None: - ... +def validate_integer(name: str, val: None, min_val: int = ...) -> None: ... @overload -def validate_integer(name: str, val: float, min_val: int = ...) -> int: - ... +def validate_integer(name: str, val: float, min_val: int = ...) -> int: ... @overload -def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None: - ... +def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None: ... def validate_integer( @@ -691,8 +689,7 @@ def read_csv( iterator: Literal[True], chunksize: int | None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> TextFileReader: - ... +) -> TextFileReader: ... @overload @@ -702,8 +699,7 @@ def read_csv( iterator: bool = ..., chunksize: int, **kwds: Unpack[_read_shared[HashableT]], -) -> TextFileReader: - ... +) -> TextFileReader: ... @overload @@ -713,8 +709,7 @@ def read_csv( iterator: Literal[False] = ..., chunksize: None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -724,8 +719,7 @@ def read_csv( iterator: bool = ..., chunksize: int | None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> DataFrame | TextFileReader: - ... +) -> DataFrame | TextFileReader: ... @Appender( @@ -896,8 +890,7 @@ def read_table( iterator: Literal[True], chunksize: int | None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> TextFileReader: - ... +) -> TextFileReader: ... @overload @@ -907,8 +900,7 @@ def read_table( iterator: bool = ..., chunksize: int, **kwds: Unpack[_read_shared[HashableT]], -) -> TextFileReader: - ... +) -> TextFileReader: ... @overload @@ -918,8 +910,7 @@ def read_table( iterator: Literal[False] = ..., chunksize: None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -929,8 +920,7 @@ def read_table( iterator: bool = ..., chunksize: int | None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> DataFrame | TextFileReader: - ... +) -> DataFrame | TextFileReader: ... @Appender( @@ -1097,8 +1087,7 @@ def read_fwf( iterator: Literal[True], chunksize: int | None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> TextFileReader: - ... +) -> TextFileReader: ... @overload @@ -1111,8 +1100,7 @@ def read_fwf( iterator: bool = ..., chunksize: int, **kwds: Unpack[_read_shared[HashableT]], -) -> TextFileReader: - ... +) -> TextFileReader: ... @overload @@ -1125,8 +1113,7 @@ def read_fwf( iterator: Literal[False] = ..., chunksize: None = ..., **kwds: Unpack[_read_shared[HashableT]], -) -> DataFrame: - ... +) -> DataFrame: ... def read_fwf( diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index d37c77182d3fe..f0441f583bea2 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -1,4 +1,5 @@ -""" pickle compat """ +"""pickle compat""" + from __future__ import annotations import pickle diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 5703f626e3b04..e804c1b751d4a 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2,6 +2,7 @@ High level interface to PyTables for reading and writing pandas data structures to disk """ + from __future__ import annotations from contextlib import suppress diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 275fad2a565bf..49287ddf5ff38 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -13,6 +13,7 @@ Reference for binary data compression: http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ + from __future__ import annotations from collections import abc diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py index 62c17bd03927e..8da7becd76d3b 100644 --- a/pandas/io/sas/sas_constants.py +++ b/pandas/io/sas/sas_constants.py @@ -181,36 +181,36 @@ class SASIndex: subheader_signature_to_index: Final = { - b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index, - b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index, - b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index, - b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index, - b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index, - b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index, - b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index, - b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index, - b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index, - b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, - b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, - b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index, - b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index, - b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index, - b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index, - b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index, - b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, - b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, - b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index, - b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, - b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, - b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index, - b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index, - b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index, - b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index, + b"\xf7\xf7\xf7\xf7": SASIndex.row_size_index, + b"\x00\x00\x00\x00\xf7\xf7\xf7\xf7": SASIndex.row_size_index, + b"\xf7\xf7\xf7\xf7\x00\x00\x00\x00": SASIndex.row_size_index, + b"\xf7\xf7\xf7\xf7\xff\xff\xfb\xfe": SASIndex.row_size_index, + b"\xf6\xf6\xf6\xf6": SASIndex.column_size_index, + b"\x00\x00\x00\x00\xf6\xf6\xf6\xf6": SASIndex.column_size_index, + b"\xf6\xf6\xf6\xf6\x00\x00\x00\x00": SASIndex.column_size_index, + b"\xf6\xf6\xf6\xf6\xff\xff\xfb\xfe": SASIndex.column_size_index, + b"\x00\xfc\xff\xff": SASIndex.subheader_counts_index, + b"\xff\xff\xfc\x00": SASIndex.subheader_counts_index, + b"\x00\xfc\xff\xff\xff\xff\xff\xff": SASIndex.subheader_counts_index, + b"\xff\xff\xff\xff\xff\xff\xfc\x00": SASIndex.subheader_counts_index, + b"\xfd\xff\xff\xff": SASIndex.column_text_index, + b"\xff\xff\xff\xfd": SASIndex.column_text_index, + b"\xfd\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_text_index, + b"\xff\xff\xff\xff\xff\xff\xff\xfd": SASIndex.column_text_index, + b"\xff\xff\xff\xff": SASIndex.column_name_index, + b"\xff\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_name_index, + b"\xfc\xff\xff\xff": SASIndex.column_attributes_index, + b"\xff\xff\xff\xfc": SASIndex.column_attributes_index, + b"\xfc\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_attributes_index, + b"\xff\xff\xff\xff\xff\xff\xff\xfc": SASIndex.column_attributes_index, + b"\xfe\xfb\xff\xff": SASIndex.format_and_label_index, + b"\xff\xff\xfb\xfe": SASIndex.format_and_label_index, + b"\xfe\xfb\xff\xff\xff\xff\xff\xff": SASIndex.format_and_label_index, + b"\xff\xff\xff\xff\xff\xff\xfb\xfe": SASIndex.format_and_label_index, + b"\xfe\xff\xff\xff": SASIndex.column_list_index, + b"\xff\xff\xff\xfe": SASIndex.column_list_index, + b"\xfe\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_list_index, + b"\xff\xff\xff\xff\xff\xff\xff\xfe": SASIndex.column_list_index, } diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 11b2ed0ee7316..adba9bf117a8e 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -7,6 +7,7 @@ https://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf """ + from __future__ import annotations from collections import abc diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index ca5a75057fd34..f14943d1e0fce 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -1,6 +1,7 @@ """ Read SAS sas7bdat or xport files. """ + from __future__ import annotations from abc import ( @@ -38,12 +39,10 @@ class ReaderBase(ABC): """ @abstractmethod - def read(self, nrows: int | None = None) -> DataFrame: - ... + def read(self, nrows: int | None = None) -> DataFrame: ... @abstractmethod - def close(self) -> None: - ... + def close(self) -> None: ... def __enter__(self) -> Self: return self @@ -67,8 +66,7 @@ def read_sas( chunksize: int = ..., iterator: bool = ..., compression: CompressionOptions = ..., -) -> ReaderBase: - ... +) -> ReaderBase: ... @overload @@ -81,8 +79,7 @@ def read_sas( chunksize: None = ..., iterator: bool = ..., compression: CompressionOptions = ..., -) -> DataFrame | ReaderBase: - ... +) -> DataFrame | ReaderBase: ... @doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer") diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c0d69472598f1..b80487abbc4ab 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -242,8 +242,7 @@ def read_sql_table( columns: list[str] | None = ..., chunksize: None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -257,8 +256,7 @@ def read_sql_table( columns: list[str] | None = ..., chunksize: int = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., -) -> Iterator[DataFrame]: - ... +) -> Iterator[DataFrame]: ... def read_sql_table( @@ -374,8 +372,7 @@ def read_sql_query( chunksize: None = ..., dtype: DtypeArg | None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -389,8 +386,7 @@ def read_sql_query( chunksize: int = ..., dtype: DtypeArg | None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., -) -> Iterator[DataFrame]: - ... +) -> Iterator[DataFrame]: ... def read_sql_query( @@ -511,8 +507,7 @@ def read_sql( chunksize: None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., dtype: DtypeArg | None = None, -) -> DataFrame: - ... +) -> DataFrame: ... @overload @@ -527,8 +522,7 @@ def read_sql( chunksize: int = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., dtype: DtypeArg | None = None, -) -> Iterator[DataFrame]: - ... +) -> Iterator[DataFrame]: ... def read_sql( diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 37ea940b3938a..c3101683b9962 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -9,6 +9,7 @@ You can find more information on http://presbrey.mit.edu/PyDTA and https://www.statsmodels.org/devel/ """ + from __future__ import annotations from collections import abc diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py index 55c861e384d67..c7a4c1eacfcae 100644 --- a/pandas/plotting/__init__.py +++ b/pandas/plotting/__init__.py @@ -55,6 +55,7 @@ For the discussion about the API see https://github.com/pandas-dev/pandas/issues/26747. """ + from pandas.plotting._core import ( PlotAccessor, boxplot, diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 45a077a6151cf..d725d53bd21ec 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -35,8 +35,7 @@ def get_standard_colors( color_type: str = ..., *, color: dict[str, Color], -) -> dict[str, Color]: - ... +) -> dict[str, Color]: ... @overload @@ -46,8 +45,7 @@ def get_standard_colors( color_type: str = ..., *, color: Color | Sequence[Color] | None = ..., -) -> list[Color]: - ... +) -> list[Color]: ... @overload @@ -57,8 +55,7 @@ def get_standard_colors( color_type: str = ..., *, color: dict[str, Color] | Color | Sequence[Color] | None = ..., -) -> dict[str, Color] | list[Color]: - ... +) -> dict[str, Color] | list[Color]: ... def get_standard_colors( diff --git a/pandas/testing.py b/pandas/testing.py index 841b55df48556..0445fa5b5efc0 100644 --- a/pandas/testing.py +++ b/pandas/testing.py @@ -2,7 +2,6 @@ Public testing utility functions. """ - from pandas._testing import ( assert_extension_array_equal, assert_frame_equal, diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py index b608df1554154..d7a8b0510b50f 100644 --- a/pandas/tests/arithmetic/common.py +++ b/pandas/tests/arithmetic/common.py @@ -1,6 +1,7 @@ """ Assertion helpers for arithmetic tests. """ + import numpy as np import pytest diff --git a/pandas/tests/arrays/interval/test_overlaps.py b/pandas/tests/arrays/interval/test_overlaps.py index 4853bec51106c..5a48cf024ec0d 100644 --- a/pandas/tests/arrays/interval/test_overlaps.py +++ b/pandas/tests/arrays/interval/test_overlaps.py @@ -1,4 +1,5 @@ """Tests for Interval-Interval operations, such as overlaps, contains, etc.""" + import numpy as np import pytest diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py index 7a89656bd5aa0..5f73370554473 100644 --- a/pandas/tests/arrays/masked/test_arrow_compat.py +++ b/pandas/tests/arrays/masked/test_arrow_compat.py @@ -161,7 +161,7 @@ def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays): # Add offset to the buffer. offset = b"\x00" * (pa_array.type.bit_width // 8) data_buffer_offset = pa.py_buffer(offset + data_buffer_bytes) - mask_buffer_offset = pa.py_buffer(b"\x0E") + mask_buffer_offset = pa.py_buffer(b"\x0e") pa_array_offset = pa.Array.from_buffers( type=pa_array.type, length=len(pa_array), diff --git a/pandas/tests/arrays/masked_shared.py b/pandas/tests/arrays/masked_shared.py index 3e74402263cf9..545b14af2c98b 100644 --- a/pandas/tests/arrays/masked_shared.py +++ b/pandas/tests/arrays/masked_shared.py @@ -1,6 +1,7 @@ """ Tests shared by MaskedArray subclasses. """ + import numpy as np import pytest diff --git a/pandas/tests/arrays/numpy_/test_numpy.py b/pandas/tests/arrays/numpy_/test_numpy.py index 5112ce262f771..e86eb014465e1 100644 --- a/pandas/tests/arrays/numpy_/test_numpy.py +++ b/pandas/tests/arrays/numpy_/test_numpy.py @@ -2,6 +2,7 @@ Additional tests for NumpyExtensionArray that aren't covered by the interface tests. """ + import numpy as np import pytest diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 4b82d43158b88..597b407a29c94 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -2,6 +2,7 @@ This module tests the functionality of StringArray and ArrowStringArray. Tests for the str accessors are in pandas/tests/strings/test_string_array.py """ + import operator import numpy as np diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8f0576cc65a27..3f2723d258710 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -1,6 +1,7 @@ """ Tests for DatetimeArray """ + from __future__ import annotations from datetime import timedelta diff --git a/pandas/tests/arrays/test_ndarray_backed.py b/pandas/tests/arrays/test_ndarray_backed.py index 1fe7cc9b03e8a..2af59a03a5b3e 100644 --- a/pandas/tests/arrays/test_ndarray_backed.py +++ b/pandas/tests/arrays/test_ndarray_backed.py @@ -1,6 +1,7 @@ """ Tests for subclasses of NDArrayBackedExtensionArray """ + import numpy as np from pandas import ( diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index d54b15fbe6633..668e7192c0e52 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -3,6 +3,7 @@ related to inference and not otherwise tested in types/test_common.py """ + import collections from collections import namedtuple from collections.abc import Iterator @@ -239,8 +240,7 @@ def test_is_list_like_generic(): # is_list_like was yielding false positives for Generic classes in python 3.11 T = TypeVar("T") - class MyDataFrame(DataFrame, Generic[T]): - ... + class MyDataFrame(DataFrame, Generic[T]): ... tstc = MyDataFrame[int] tst = MyDataFrame[int]({"x": [1, 2, 3]}) diff --git a/pandas/tests/extension/array_with_attr/array.py b/pandas/tests/extension/array_with_attr/array.py index 2789d51ec2ce3..4f65424ece145 100644 --- a/pandas/tests/extension/array_with_attr/array.py +++ b/pandas/tests/extension/array_with_attr/array.py @@ -2,6 +2,7 @@ Test extension array that has custom attribute information (not stored on the dtype). """ + from __future__ import annotations import numbers diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py index 6efaa95aef1b5..cfbc365568403 100644 --- a/pandas/tests/extension/base/__init__.py +++ b/pandas/tests/extension/base/__init__.py @@ -34,6 +34,7 @@ class TestMyDtype(BaseDtypeTests): wherever the test requires it. You're free to implement additional tests. """ + from pandas.tests.extension.base.accumulate import BaseAccumulateTests from pandas.tests.extension.base.casting import BaseCastingTests from pandas.tests.extension.base.constructors import BaseConstructorsTests diff --git a/pandas/tests/extension/base/dim2.py b/pandas/tests/extension/base/dim2.py index 4da9fe8917d55..8c7d8ff491cd3 100644 --- a/pandas/tests/extension/base/dim2.py +++ b/pandas/tests/extension/base/dim2.py @@ -1,6 +1,7 @@ """ Tests for 2D compatibility. """ + import numpy as np import pytest diff --git a/pandas/tests/extension/base/index.py b/pandas/tests/extension/base/index.py index 72c4ebfb5d84a..e7bfebec92287 100644 --- a/pandas/tests/extension/base/index.py +++ b/pandas/tests/extension/base/index.py @@ -1,6 +1,7 @@ """ Tests for Indexes backed by arbitrary ExtensionArrays. """ + import pandas as pd diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index e43b50322bb92..3a4391edc99ef 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -11,6 +11,7 @@ in that case. We *want* the dictionaries to be treated as scalars, so we hack around pandas by using UserDicts. """ + from __future__ import annotations from collections import ( diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py index b3bb35c9396f4..da53bdcb4e37e 100644 --- a/pandas/tests/extension/list/array.py +++ b/pandas/tests/extension/list/array.py @@ -3,6 +3,7 @@ The ListArray stores an ndarray of lists. """ + from __future__ import annotations import numbers diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 5d634c9aeb14f..6c3706881624f 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -10,6 +10,7 @@ classes (if they are relevant for the extension interface for all dtypes), or be added to the array-specific tests in `pandas/tests/arrays/`. """ + from __future__ import annotations from datetime import ( diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index bd4ab5077c6e8..09662f7b793a9 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -13,6 +13,7 @@ be added to the array-specific tests in `pandas/tests/arrays/`. """ + import string import numpy as np diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 6352bf76f96bb..06e85f5c92913 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -13,6 +13,7 @@ be added to the array-specific tests in `pandas/tests/arrays/`. """ + import numpy as np import pytest diff --git a/pandas/tests/extension/test_extension.py b/pandas/tests/extension/test_extension.py index 1ed626cd51080..456f4863b1c31 100644 --- a/pandas/tests/extension/test_extension.py +++ b/pandas/tests/extension/test_extension.py @@ -1,6 +1,7 @@ """ Tests for behavior if an author does *not* implement EA methods. """ + import numpy as np import pytest diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index 98dd1c5cb615f..6900d6d67f9d9 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -13,6 +13,7 @@ be added to the array-specific tests in `pandas/tests/arrays/`. """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/tests/extension/test_masked.py b/pandas/tests/extension/test_masked.py index 651f783b44d1f..5481e50de10bb 100644 --- a/pandas/tests/extension/test_masked.py +++ b/pandas/tests/extension/test_masked.py @@ -13,6 +13,7 @@ be added to the array-specific tests in `pandas/tests/arrays/`. """ + import warnings import numpy as np diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 3f54f6cbbba69..ca79c13ed44e4 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -15,6 +15,7 @@ Note: we do not bother with base.BaseIndexTests because NumpyExtensionArray will never be held in an Index. """ + import numpy as np import pytest diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py index 2d1d213322bac..142bad6db4f95 100644 --- a/pandas/tests/extension/test_period.py +++ b/pandas/tests/extension/test_period.py @@ -13,6 +13,7 @@ be added to the array-specific tests in `pandas/tests/arrays/`. """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py index 2d5a134f8560a..c09d4d315451f 100644 --- a/pandas/tests/extension/test_string.py +++ b/pandas/tests/extension/test_string.py @@ -13,6 +13,7 @@ be added to the array-specific tests in `pandas/tests/arrays/`. """ + from __future__ import annotations import string diff --git a/pandas/tests/frame/indexing/test_coercion.py b/pandas/tests/frame/indexing/test_coercion.py index ba0d8613b6228..f55605d1ffa12 100644 --- a/pandas/tests/frame/indexing/test_coercion.py +++ b/pandas/tests/frame/indexing/test_coercion.py @@ -4,6 +4,7 @@ For the most part, these should be multi-column DataFrames, otherwise we would share the tests with Series. """ + import numpy as np import pytest diff --git a/pandas/tests/frame/indexing/test_insert.py b/pandas/tests/frame/indexing/test_insert.py index 26eba5f49bd39..b530cb98ef46c 100644 --- a/pandas/tests/frame/indexing/test_insert.py +++ b/pandas/tests/frame/indexing/test_insert.py @@ -3,6 +3,7 @@ confused with tests with "insert" in their names that are really testing __setitem__. """ + import numpy as np import pytest diff --git a/pandas/tests/frame/methods/test_first_valid_index.py b/pandas/tests/frame/methods/test_first_valid_index.py index 2e27f1aa71700..5855be2373ae2 100644 --- a/pandas/tests/frame/methods/test_first_valid_index.py +++ b/pandas/tests/frame/methods/test_first_valid_index.py @@ -1,6 +1,7 @@ """ Includes test for last_valid_index. """ + import numpy as np import pytest diff --git a/pandas/tests/frame/methods/test_nlargest.py b/pandas/tests/frame/methods/test_nlargest.py index 8a7b985c98069..7b6a0487c296a 100644 --- a/pandas/tests/frame/methods/test_nlargest.py +++ b/pandas/tests/frame/methods/test_nlargest.py @@ -2,6 +2,7 @@ Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo" but are implicitly also testing nsmallest_foo. """ + from string import ascii_lowercase import numpy as np diff --git a/pandas/tests/frame/test_npfuncs.py b/pandas/tests/frame/test_npfuncs.py index 6b5c469403130..e9a241202d156 100644 --- a/pandas/tests/frame/test_npfuncs.py +++ b/pandas/tests/frame/test_npfuncs.py @@ -1,6 +1,7 @@ """ Tests for np.foo applied to DataFrame, not necessarily ufuncs. """ + import numpy as np from pandas import ( diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index 6c108847c2bc6..cfa3cabbc1747 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -1,4 +1,5 @@ """Tests dealing with the NDFrame.allows_duplicates.""" + import operator import numpy as np diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index fd815c85a89b3..f2eecbe86926b 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -1,6 +1,7 @@ """ An exhaustive list of pandas methods exercising NDFrame.__finalize__. """ + import operator import re diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index d8f832002dac6..2b9df1b7079da 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -1,6 +1,7 @@ """ test .agg behavior / note that .apply is tested generally in test_groupby.py """ + import datetime import functools from functools import partial diff --git a/pandas/tests/groupby/methods/test_value_counts.py b/pandas/tests/groupby/methods/test_value_counts.py index 24990e64bb51c..a8d359f3206c2 100644 --- a/pandas/tests/groupby/methods/test_value_counts.py +++ b/pandas/tests/groupby/methods/test_value_counts.py @@ -4,7 +4,6 @@ and proper parameter handling """ - import numpy as np import pytest diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 36b5a6f638418..699fffe5d0488 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -1,6 +1,7 @@ """ test where we are determining what we are grouping, or getting groups """ + from datetime import ( date, timedelta, diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index aba3b2f27c633..ea556d043be2d 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -1,6 +1,7 @@ """ test with the TimeGrouper / grouping with datetimes """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index c9ff4608c6563..e91ca64bb8970 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -1,4 +1,5 @@ -""" test with the .transform """ +"""test with the .transform""" + import numpy as np import pytest diff --git a/pandas/tests/indexes/base_class/test_reshape.py b/pandas/tests/indexes/base_class/test_reshape.py index 814a6a516904b..e17e39a334acc 100644 --- a/pandas/tests/indexes/base_class/test_reshape.py +++ b/pandas/tests/indexes/base_class/test_reshape.py @@ -1,6 +1,7 @@ """ Tests for ndarray-like method on the base Index class """ + import numpy as np import pytest diff --git a/pandas/tests/indexes/categorical/test_formats.py b/pandas/tests/indexes/categorical/test_formats.py index 74e738a543300..491db3a63cc0d 100644 --- a/pandas/tests/indexes/categorical/test_formats.py +++ b/pandas/tests/indexes/categorical/test_formats.py @@ -1,6 +1,7 @@ """ Tests for CategoricalIndex.__repr__ and related methods. """ + import pytest from pandas._config import using_pyarrow_string_dtype diff --git a/pandas/tests/indexes/datetimelike_/test_equals.py b/pandas/tests/indexes/datetimelike_/test_equals.py index fc9fbd33d0d28..08134d9f3efb4 100644 --- a/pandas/tests/indexes/datetimelike_/test_equals.py +++ b/pandas/tests/indexes/datetimelike_/test_equals.py @@ -1,6 +1,7 @@ """ Tests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index bb2c3d921ea1f..173b32b12e2d1 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -1,4 +1,4 @@ -""" test partial slicing on Series/Frame """ +"""test partial slicing on Series/Frame""" from datetime import datetime diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index daa5b346eb4ec..0c8bdbdd2fb22 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -1,6 +1,7 @@ """ Tests for DatetimeIndex timezone-related methods """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py index 10204cfb78e89..284e219fd20e4 100644 --- a/pandas/tests/indexes/test_any_index.py +++ b/pandas/tests/indexes/test_any_index.py @@ -1,6 +1,7 @@ """ Tests that can be parametrized over _any_ Index object. """ + import re import numpy as np diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index d7ef2d39e8df6..eb0010066a7f6 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -3,6 +3,7 @@ any index subclass except for MultiIndex. Makes use of the `index_flat` fixture defined in pandas/conftest.py. """ + from copy import ( copy, deepcopy, diff --git a/pandas/tests/indexes/test_datetimelike.py b/pandas/tests/indexes/test_datetimelike.py index 330ea50dc1373..7ec73070836b8 100644 --- a/pandas/tests/indexes/test_datetimelike.py +++ b/pandas/tests/indexes/test_datetimelike.py @@ -1,4 +1,4 @@ -""" generic datetimelike tests """ +"""generic datetimelike tests""" import numpy as np import pytest diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py index 2e61340023948..21cb0b8723d59 100644 --- a/pandas/tests/indexes/test_index_new.py +++ b/pandas/tests/indexes/test_index_new.py @@ -1,6 +1,7 @@ """ Tests for the Index constructor conducting inference. """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py index e6716239cca5a..1bbffcee3b671 100644 --- a/pandas/tests/indexes/test_indexing.py +++ b/pandas/tests/indexes/test_indexing.py @@ -14,6 +14,7 @@ The corresponding tests.indexes.[index_type].test_indexing files contain tests for the corresponding methods specific to those Index subclasses. """ + import numpy as np import pytest diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index 27b54ea66f0ac..9a3471fe526c1 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -2,6 +2,7 @@ The tests in this package are to ensure the proper resultant dtypes of set operations. """ + from datetime import datetime import operator diff --git a/pandas/tests/indexes/test_subclass.py b/pandas/tests/indexes/test_subclass.py index c3287e1ddcddc..a8ba8c3090cf2 100644 --- a/pandas/tests/indexes/test_subclass.py +++ b/pandas/tests/indexes/test_subclass.py @@ -1,6 +1,7 @@ """ Tests involving custom Index subclasses """ + import numpy as np from pandas import ( diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 2af76f69a4300..a33fb1e6979ec 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -1,4 +1,5 @@ -""" common utilities """ +"""common utilities""" + from __future__ import annotations from typing import ( diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 8650a1afb383d..172aa9878caec 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -1,4 +1,4 @@ -""" test positional based indexing with iloc """ +"""test positional based indexing with iloc""" from datetime import datetime import re diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 45ec968714aff..60a3ccf0b7483 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1,4 +1,4 @@ -""" test fancy indexing & misc """ +"""test fancy indexing & misc""" import array from datetime import datetime diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 9c33d15c01cd6..7112b866018a2 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1,4 +1,5 @@ -""" test label based indexing with loc """ +"""test label based indexing with loc""" + from collections import namedtuple from datetime import ( date, diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index a51334c03a302..730fe584d7f07 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -1,4 +1,5 @@ -""" test scalar indexing, including at and iat """ +"""test scalar indexing, including at and iat""" + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/interchange/test_spec_conformance.py b/pandas/tests/interchange/test_spec_conformance.py index 7c02379c11853..55e42ed2023cd 100644 --- a/pandas/tests/interchange/test_spec_conformance.py +++ b/pandas/tests/interchange/test_spec_conformance.py @@ -2,6 +2,7 @@ A verbatim copy (vendored) of the spec tests. Taken from https://github.com/data-apis/dataframe-api """ + import ctypes import math diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index d3ddc13c1497e..508fc47d0920b 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -1257,18 +1257,18 @@ def test_engine_kwargs(self, engine, tmp_excel): } if PY310: - msgs[ - "openpyxl" - ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" - msgs[ - "xlsxwriter" - ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" + msgs["openpyxl"] = ( + "Workbook.__init__() got an unexpected keyword argument 'foo'" + ) + msgs["xlsxwriter"] = ( + "Workbook.__init__() got an unexpected keyword argument 'foo'" + ) # Handle change in error message for openpyxl (write and append mode) if engine == "openpyxl" and not os.path.exists(tmp_excel): - msgs[ - "openpyxl" - ] = r"load_workbook() got an unexpected keyword argument 'foo'" + msgs["openpyxl"] = ( + r"load_workbook() got an unexpected keyword argument 'foo'" + ) with pytest.raises(TypeError, match=re.escape(msgs[engine])): df.to_excel( diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 43e94b8c55589..b12cfc6876a8e 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -2,6 +2,7 @@ Tests for the file pandas.io.formats.format, *not* tests for general formatting of pandas objects. """ + from datetime import datetime from io import StringIO import re diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index 927a9f4961f6f..3b782713eed6c 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -2,6 +2,7 @@ ExcelFormatter is tested implicitly in pandas/tests/io/excel """ + import string import pytest diff --git a/pandas/tests/io/json/test_deprecated_kwargs.py b/pandas/tests/io/json/test_deprecated_kwargs.py index cc88fc3ba1826..9da682c90a285 100644 --- a/pandas/tests/io/json/test_deprecated_kwargs.py +++ b/pandas/tests/io/json/test_deprecated_kwargs.py @@ -1,6 +1,7 @@ """ Tests for the deprecated keyword arguments for `read_json`. """ + from io import StringIO import pandas as pd diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index b6fa90edbf106..afc9974c75e6a 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -1,4 +1,5 @@ """Tests for Table Schema integration.""" + from collections import OrderedDict from io import StringIO import json diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py index 9f42cf674b0a7..cdf4d6ae77f91 100644 --- a/pandas/tests/io/parser/common/test_chunksize.py +++ b/pandas/tests/io/parser/common/test_chunksize.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 7ffc49e941c14..485680d9de48c 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from datetime import datetime from inspect import signature from io import StringIO diff --git a/pandas/tests/io/parser/common/test_data_list.py b/pandas/tests/io/parser/common/test_data_list.py index 3b0ff9e08d349..bf9293ddd841d 100644 --- a/pandas/tests/io/parser/common/test_data_list.py +++ b/pandas/tests/io/parser/common/test_data_list.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + import csv from io import StringIO diff --git a/pandas/tests/io/parser/common/test_decimal.py b/pandas/tests/io/parser/common/test_decimal.py index 4ceca037f589a..eb6c97097e5fb 100644 --- a/pandas/tests/io/parser/common/test_decimal.py +++ b/pandas/tests/io/parser/common/test_decimal.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import StringIO import pytest diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py index b03e31c21fc81..c93c80a7bb084 100644 --- a/pandas/tests/io/parser/common/test_file_buffer_url.py +++ b/pandas/tests/io/parser/common/test_file_buffer_url.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import ( BytesIO, StringIO, diff --git a/pandas/tests/io/parser/common/test_float.py b/pandas/tests/io/parser/common/test_float.py index 6069c23936297..4e0b61577f9e7 100644 --- a/pandas/tests/io/parser/common/test_float.py +++ b/pandas/tests/io/parser/common/test_float.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/parser/common/test_index.py b/pandas/tests/io/parser/common/test_index.py index 7cdaac1a284cd..2fcc80f58ae30 100644 --- a/pandas/tests/io/parser/common/test_index.py +++ b/pandas/tests/io/parser/common/test_index.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from datetime import datetime from io import StringIO import os diff --git a/pandas/tests/io/parser/common/test_inf.py b/pandas/tests/io/parser/common/test_inf.py index dba952b1f9ebd..657aa3278a442 100644 --- a/pandas/tests/io/parser/common/test_inf.py +++ b/pandas/tests/io/parser/common/test_inf.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import StringIO import pytest diff --git a/pandas/tests/io/parser/common/test_ints.py b/pandas/tests/io/parser/common/test_ints.py index e77958b0e9acc..9322e8d54f5b8 100644 --- a/pandas/tests/io/parser/common/test_ints.py +++ b/pandas/tests/io/parser/common/test_ints.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/parser/common/test_iterator.py b/pandas/tests/io/parser/common/test_iterator.py index a521c84aa007d..091edb67f6e19 100644 --- a/pandas/tests/io/parser/common/test_iterator.py +++ b/pandas/tests/io/parser/common/test_iterator.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import StringIO import pytest diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py index f5a724bad4fa2..0827f64dccf46 100644 --- a/pandas/tests/io/parser/common/test_read_errors.py +++ b/pandas/tests/io/parser/common/test_read_errors.py @@ -2,6 +2,7 @@ Tests that work on the Python, C and PyArrow engines but do not have a specific classification into the other test modules. """ + import codecs import csv from io import StringIO @@ -57,9 +58,12 @@ def test_bad_stream_exception(all_parsers, csv_dir_path): msg = "'utf-8' codec can't decode byte" # Stream must be binary UTF8. - with open(path, "rb") as handle, codecs.StreamRecoder( - handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter - ) as stream: + with ( + open(path, "rb") as handle, + codecs.StreamRecoder( + handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter + ) as stream, + ): with pytest.raises(UnicodeDecodeError, match=msg): parser.read_csv(stream) diff --git a/pandas/tests/io/parser/common/test_verbose.py b/pandas/tests/io/parser/common/test_verbose.py index fede54643d2dd..c5490afba1e04 100644 --- a/pandas/tests/io/parser/common/test_verbose.py +++ b/pandas/tests/io/parser/common/test_verbose.py @@ -2,6 +2,7 @@ Tests that work on both the Python and C engines but do not have a specific classification into the other test modules. """ + from io import StringIO import pytest diff --git a/pandas/tests/io/parser/dtypes/test_categorical.py b/pandas/tests/io/parser/dtypes/test_categorical.py index f4aff14a5ce32..15cbac54ff8d9 100644 --- a/pandas/tests/io/parser/dtypes/test_categorical.py +++ b/pandas/tests/io/parser/dtypes/test_categorical.py @@ -2,6 +2,7 @@ Tests dtype specification during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import os diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py index 70fd0b02cc79d..d45368dece6d2 100644 --- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py +++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py @@ -2,6 +2,7 @@ Tests dtype specification during parsing for all of the parsers defined in parsers.py """ + from collections import defaultdict from io import StringIO diff --git a/pandas/tests/io/parser/dtypes/test_empty.py b/pandas/tests/io/parser/dtypes/test_empty.py index 609c4cbe77fc8..ebc61e7f0ca2b 100644 --- a/pandas/tests/io/parser/dtypes/test_empty.py +++ b/pandas/tests/io/parser/dtypes/test_empty.py @@ -2,6 +2,7 @@ Tests dtype specification during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 27d7bc0bb6c07..090235c862a2a 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -4,6 +4,7 @@ these tests out of this module as soon as the Python parser can accept further arguments when parsing. """ + from decimal import Decimal from io import ( BytesIO, @@ -509,7 +510,7 @@ def __next__(self): def test_buffer_rd_bytes_bad_unicode(c_parser_only): # see gh-22748 - t = BytesIO(b"\xB0") + t = BytesIO(b"\xb0") t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape") msg = "'utf-8' codec can't encode character" with pytest.raises(UnicodeError, match=msg): diff --git a/pandas/tests/io/parser/test_comment.py b/pandas/tests/io/parser/test_comment.py index abaeeb86476da..ca8df520b171e 100644 --- a/pandas/tests/io/parser/test_comment.py +++ b/pandas/tests/io/parser/test_comment.py @@ -2,6 +2,7 @@ Tests that comments are properly handled during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/parser/test_converters.py b/pandas/tests/io/parser/test_converters.py index b6b882b4ec432..7986df62a6b6f 100644 --- a/pandas/tests/io/parser/test_converters.py +++ b/pandas/tests/io/parser/test_converters.py @@ -2,6 +2,7 @@ Tests column conversion functionality during parsing for all of the parsers defined in parsers.py """ + from io import StringIO from dateutil.parser import parse diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py index 31b7e9df1e0ec..5df8c3d27bf84 100644 --- a/pandas/tests/io/parser/test_encoding.py +++ b/pandas/tests/io/parser/test_encoding.py @@ -2,6 +2,7 @@ Tests encoding functionality during parsing for all of the parsers defined in parsers.py """ + from io import ( BytesIO, TextIOWrapper, diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py index ba15d061b2deb..24d0a7626723e 100644 --- a/pandas/tests/io/parser/test_index_col.py +++ b/pandas/tests/io/parser/test_index_col.py @@ -3,6 +3,7 @@ is properly handled or inferred during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py index 1d245f81f027c..61d328138da96 100644 --- a/pandas/tests/io/parser/test_mangle_dupes.py +++ b/pandas/tests/io/parser/test_mangle_dupes.py @@ -3,6 +3,7 @@ CSV engine. In general, the expected result is that they are either thoroughly de-duplicated (if mangling requested) or ignored otherwise. """ + from io import StringIO import pytest diff --git a/pandas/tests/io/parser/test_multi_thread.py b/pandas/tests/io/parser/test_multi_thread.py index da9b9bddd30cd..7fac67df44ca2 100644 --- a/pandas/tests/io/parser/test_multi_thread.py +++ b/pandas/tests/io/parser/test_multi_thread.py @@ -2,6 +2,7 @@ Tests multithreading behaviour for reading and parsing files for each parser defined in parsers.py """ + from contextlib import ExitStack from io import BytesIO from multiprocessing.pool import ThreadPool diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py index 6ebfc8f337c10..ba0e3033321e4 100644 --- a/pandas/tests/io/parser/test_na_values.py +++ b/pandas/tests/io/parser/test_na_values.py @@ -2,6 +2,7 @@ Tests that NA values are properly handled during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 9351387dfc337..f63cc3d56bf89 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -2,6 +2,7 @@ Tests parsers ability to read and parse non-local files and hence require a network connection to be read. """ + from io import BytesIO import logging import re diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py index dc3c527e82202..c0ea5936164a1 100644 --- a/pandas/tests/io/parser/test_python_parser_only.py +++ b/pandas/tests/io/parser/test_python_parser_only.py @@ -4,6 +4,7 @@ these tests out of this module as soon as the C parser can accept further arguments when parsing. """ + from __future__ import annotations import csv diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index 1b3d1d41bc1c9..6aeed2377a3aa 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -2,6 +2,7 @@ Tests the TextReader class in parsers.pyx, which is integral to the C engine in parsers.py """ + from io import ( BytesIO, StringIO, diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 3e52e9b68735d..8d4c28bd61fa1 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -6,6 +6,7 @@ Ultimately, the goal is to remove test cases from this test suite as new feature support is added to the parsers. """ + from io import StringIO import os from pathlib import Path diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py index bc66189ca064e..75efe87c408c0 100644 --- a/pandas/tests/io/parser/usecols/test_parse_dates.py +++ b/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -2,6 +2,7 @@ Tests the usecols functionality during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import pytest diff --git a/pandas/tests/io/parser/usecols/test_strings.py b/pandas/tests/io/parser/usecols/test_strings.py index 0d51c2cb3cdb4..1538bd4e805f7 100644 --- a/pandas/tests/io/parser/usecols/test_strings.py +++ b/pandas/tests/io/parser/usecols/test_strings.py @@ -2,6 +2,7 @@ Tests the usecols functionality during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import pytest diff --git a/pandas/tests/io/parser/usecols/test_usecols_basic.py b/pandas/tests/io/parser/usecols/test_usecols_basic.py index 214070b1ac5f2..d55066d2d70bb 100644 --- a/pandas/tests/io/parser/usecols/test_usecols_basic.py +++ b/pandas/tests/io/parser/usecols/test_usecols_basic.py @@ -2,6 +2,7 @@ Tests the usecols functionality during parsing for all of the parsers defined in parsers.py """ + from io import StringIO import numpy as np diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index 529d6d789596f..cc61d8bca7de3 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -940,8 +940,9 @@ def test_append_to_multiple_dropna_false(setup_path): df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan df = concat([df1, df2], axis=1) - with ensure_clean_store(setup_path) as store, pd.option_context( - "io.hdf.dropna_table", True + with ( + ensure_clean_store(setup_path) as store, + pd.option_context("io.hdf.dropna_table", True), ): # dropna=False shouldn't synchronize row indexes store.append_to_multiple( diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 886bff332a420..f5880d8a894f8 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -1,6 +1,7 @@ """ Tests for the pandas.io.common functionalities """ + import codecs import errno from functools import partial diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index c8b5b690ae118..893728748f276 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -1,4 +1,5 @@ -""" test feather-format compat """ +"""test feather-format compat""" + import numpy as np import pytest diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py index 0ce6a8bf82cd8..a4cf257296b09 100644 --- a/pandas/tests/io/test_gcs.py +++ b/pandas/tests/io/test_gcs.py @@ -115,15 +115,17 @@ def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str): """ if compression == "zip": # Only compare the CRC checksum of the file contents - with zipfile.ZipFile(BytesIO(result)) as exp, zipfile.ZipFile( - BytesIO(expected) - ) as res: + with ( + zipfile.ZipFile(BytesIO(result)) as exp, + zipfile.ZipFile(BytesIO(expected)) as res, + ): for res_info, exp_info in zip(res.infolist(), exp.infolist()): assert res_info.CRC == exp_info.CRC elif compression == "tar": - with tarfile.open(fileobj=BytesIO(result)) as tar_exp, tarfile.open( - fileobj=BytesIO(expected) - ) as tar_res: + with ( + tarfile.open(fileobj=BytesIO(result)) as tar_exp, + tarfile.open(fileobj=BytesIO(expected)) as tar_res, + ): for tar_res_info, tar_exp_info in zip( tar_res.getmembers(), tar_exp.getmembers() ): diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 2251fa20f0b63..2c0f19dc74ed2 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1461,8 +1461,7 @@ def seekable(self): return True # GH 49036 pylint checks for presence of __next__ for iterators - def __next__(self): - ... + def __next__(self): ... def __iter__(self) -> Iterator: # `is_file_like` depends on the presence of diff --git a/pandas/tests/io/test_http_headers.py b/pandas/tests/io/test_http_headers.py index 550637a50c1c4..dfae294a147a2 100644 --- a/pandas/tests/io/test_http_headers.py +++ b/pandas/tests/io/test_http_headers.py @@ -1,6 +1,7 @@ """ Tests for the pandas custom headers in http(s) requests """ + from functools import partial import gzip from io import BytesIO diff --git a/pandas/tests/io/test_orc.py b/pandas/tests/io/test_orc.py index b4a8c713d99ab..de6d46492e916 100644 --- a/pandas/tests/io/test_orc.py +++ b/pandas/tests/io/test_orc.py @@ -1,4 +1,5 @@ -""" test orc compat """ +"""test orc compat""" + import datetime from decimal import Decimal from io import BytesIO diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 3cba7b7da347e..55be48eb572fd 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1,4 +1,5 @@ -""" test parquet compat """ +"""test parquet compat""" + import datetime from decimal import Decimal from io import BytesIO diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index ed8d4371e0f3a..1420e24858ffb 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -10,6 +10,7 @@ 3. Move the created pickle to "data/legacy_pickle/" directory. """ + from __future__ import annotations from array import array diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 42a9e84218a81..9078ca865042d 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -419,7 +419,7 @@ def test_read_write_dta11(self): [(1, 2, 3, 4)], columns=[ "good", - "b\u00E4d", + "b\u00e4d", "8number", "astringwithmorethan32characters______", ], @@ -1368,7 +1368,7 @@ def test_invalid_variable_label_encoding(self, version, mixed_frame): ) def test_write_variable_label_errors(self, mixed_frame): - values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"] + values = ["\u03a1", "\u0391", "\u039d", "\u0394", "\u0391", "\u03a3"] variable_labels_utf8 = { "a": "City Rank", diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 7e0b8dc7282e4..25669ce75953f 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -1,4 +1,5 @@ -""" Test cases for DataFrame.plot """ +"""Test cases for DataFrame.plot""" + from datetime import ( date, datetime, @@ -204,7 +205,7 @@ def test_plot_multiindex_unicode(self): columns=columns, index=index, ) - _check_plot_works(df.plot, title="\u03A3") + _check_plot_works(df.plot, title="\u03a3") @pytest.mark.slow @pytest.mark.parametrize("layout", [None, (-1, 1)]) diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py index 4f14f1e43cf29..76d3b20aaa2c6 100644 --- a/pandas/tests/plotting/frame/test_frame_color.py +++ b/pandas/tests/plotting/frame/test_frame_color.py @@ -1,4 +1,5 @@ -""" Test cases for DataFrame.plot """ +"""Test cases for DataFrame.plot""" + import re import numpy as np diff --git a/pandas/tests/plotting/frame/test_frame_groupby.py b/pandas/tests/plotting/frame/test_frame_groupby.py index f1924185a3df1..b7e147bbabde5 100644 --- a/pandas/tests/plotting/frame/test_frame_groupby.py +++ b/pandas/tests/plotting/frame/test_frame_groupby.py @@ -1,4 +1,4 @@ -""" Test cases for DataFrame.plot """ +"""Test cases for DataFrame.plot""" import pytest diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py index fb34592b288af..16853114d93cd 100644 --- a/pandas/tests/plotting/frame/test_frame_subplots.py +++ b/pandas/tests/plotting/frame/test_frame_subplots.py @@ -1,4 +1,4 @@ -""" Test cases for DataFrame.plot """ +"""Test cases for DataFrame.plot""" import string diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index abafad5b1d7da..2dd45a9abc7a5 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -1,4 +1,4 @@ -""" Test cases for .boxplot method """ +"""Test cases for .boxplot method""" import itertools import string diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 5d44c399ee726..2eb44ef4771e0 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1,4 +1,5 @@ -""" Test cases for time series specific (freq conversion, etc) """ +"""Test cases for time series specific (freq conversion, etc)""" + from datetime import ( date, datetime, diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py index 5ebf93510a615..0cb125d822fd1 100644 --- a/pandas/tests/plotting/test_groupby.py +++ b/pandas/tests/plotting/test_groupby.py @@ -1,5 +1,4 @@ -""" Test cases for GroupBy.plot """ - +"""Test cases for GroupBy.plot""" import numpy as np import pytest diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 0318abe7bdfac..511c1dd7761d5 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -1,4 +1,5 @@ -""" Test cases for .hist method """ +"""Test cases for .hist method""" + import re import numpy as np diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index cfb657c2a800f..d593ddbbaa0b8 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -1,4 +1,5 @@ -""" Test cases for misc plot functions """ +"""Test cases for misc plot functions""" + import os import numpy as np diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 2b2f2f3b84307..9fbc20e10f5c1 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -1,4 +1,5 @@ -""" Test cases for Series.plot """ +"""Test cases for Series.plot""" + from datetime import datetime from itertools import chain diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py index a6aaeba1dc3a8..60fcf8cbc142c 100644 --- a/pandas/tests/reductions/test_stat_reductions.py +++ b/pandas/tests/reductions/test_stat_reductions.py @@ -1,6 +1,7 @@ """ Tests for statistical reductions of 2nd moment or higher: var, skew, kurt, ... """ + import inspect import numpy as np diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index f3edaffdb315d..96721f11cb2d6 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -1,6 +1,7 @@ """ Tests for scalar Timedelta arithmetic ops """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index d4398f66e6f89..06a0f3324c2cf 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -1,4 +1,5 @@ -""" test the scalar Timedelta """ +"""test the scalar Timedelta""" + from datetime import timedelta import sys diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 44a16e51f2c47..ea970433464fc 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -1,4 +1,4 @@ -""" test the scalar Timestamp """ +"""test the scalar Timestamp""" import calendar from datetime import ( diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py index cb2a35be907cd..8f2ee3ef45075 100644 --- a/pandas/tests/scalar/timestamp/test_timezones.py +++ b/pandas/tests/scalar/timestamp/test_timezones.py @@ -1,6 +1,7 @@ """ Tests for Timestamp timezone-related methods """ + from datetime import datetime from pandas._libs.tslibs import timezones diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index a6e4b4f78e25a..e0ca4bf64ea91 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -1,6 +1,7 @@ """ Also test support for datetime64[ns] in Series / DataFrame """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 01c775e492888..fac543ac450a5 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -1,6 +1,7 @@ """ Series.__getitem__ test classes are organized by the type of key passed. """ + from datetime import ( date, datetime, diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 5c36877e5ac86..a629d18131306 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -1,4 +1,5 @@ -""" test get/set & misc """ +"""test get/set & misc""" + from datetime import timedelta import re diff --git a/pandas/tests/series/methods/test_isna.py b/pandas/tests/series/methods/test_isna.py index 7e324aa86a052..92bf2945cc0d1 100644 --- a/pandas/tests/series/methods/test_isna.py +++ b/pandas/tests/series/methods/test_isna.py @@ -1,6 +1,7 @@ """ We also test Series.notna in this file. """ + import numpy as np from pandas import ( diff --git a/pandas/tests/series/methods/test_item.py b/pandas/tests/series/methods/test_item.py index 8e8c33619d564..e927fa69db358 100644 --- a/pandas/tests/series/methods/test_item.py +++ b/pandas/tests/series/methods/test_item.py @@ -2,6 +2,7 @@ Series.item method, mainly testing that we get python scalars as opposed to numpy scalars. """ + import pytest from pandas import ( diff --git a/pandas/tests/series/methods/test_nlargest.py b/pandas/tests/series/methods/test_nlargest.py index c37f57771e29d..56b7cf42a798d 100644 --- a/pandas/tests/series/methods/test_nlargest.py +++ b/pandas/tests/series/methods/test_nlargest.py @@ -2,6 +2,7 @@ Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo" but are implicitly also testing nsmallest_foo. """ + import numpy as np import pytest diff --git a/pandas/tests/series/methods/test_set_name.py b/pandas/tests/series/methods/test_set_name.py index cbc8ebde7a8ab..137207053c225 100644 --- a/pandas/tests/series/methods/test_set_name.py +++ b/pandas/tests/series/methods/test_set_name.py @@ -14,7 +14,7 @@ def test_set_name(self): def test_set_name_attribute(self): ser = Series([1, 2, 3]) ser2 = Series([1, 2, 3], name="bar") - for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]: + for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05d0"]: ser.name = name assert ser.name == name ser2.name = name diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index b00074c04257e..68737e86f0c6a 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1587,7 +1587,7 @@ def test_NaT_cast(self): tm.assert_series_equal(result, expected) def test_constructor_name_hashable(self): - for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]: + for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05d0"]: for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]: s = Series(data, name=n) assert s.name == n diff --git a/pandas/tests/series/test_formats.py b/pandas/tests/series/test_formats.py index 1a3b46ec8196a..c001e0f9b028a 100644 --- a/pandas/tests/series/test_formats.py +++ b/pandas/tests/series/test_formats.py @@ -114,13 +114,13 @@ def test_datetime(self, datetime_series): 1, 1.2, "foo", - "\u03B1\u03B2\u03B3", + "\u03b1\u03b2\u03b3", "loooooooooooooooooooooooooooooooooooooooooooooooooooong", ("foo", "bar", "baz"), (1, 2), ("foo", 1, 2.3), - ("\u03B1", "\u03B2", "\u03B3"), - ("\u03B1", "bar"), + ("\u03b1", "\u03b2", "\u03b3"), + ("\u03b1", "bar"), ], ) def test_various_names(self, name, string_series): diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 824e550e0f03b..a4fd29878a2d1 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -1,6 +1,7 @@ """ Testing that we work in the downstream packages """ + import array from functools import partial import subprocess diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 42764c121e3d2..9d93a05cf1761 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1,4 +1,4 @@ -""" test to_datetime """ +"""test to_datetime""" import calendar from collections import deque diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py index efb010addad22..e0838cceb4c7b 100644 --- a/pandas/tests/tseries/offsets/common.py +++ b/pandas/tests/tseries/offsets/common.py @@ -1,6 +1,7 @@ """ Assertion helpers and base class for offsets tests """ + from __future__ import annotations diff --git a/pandas/tests/tseries/offsets/test_business_day.py b/pandas/tests/tseries/offsets/test_business_day.py index 7db1921369023..b1ab5fc64804b 100644 --- a/pandas/tests/tseries/offsets/test_business_day.py +++ b/pandas/tests/tseries/offsets/test_business_day.py @@ -1,6 +1,7 @@ """ Tests for offsets.BDay """ + from __future__ import annotations from datetime import ( diff --git a/pandas/tests/tseries/offsets/test_business_hour.py b/pandas/tests/tseries/offsets/test_business_hour.py index f01406fb50d23..1b488dc9a47d4 100644 --- a/pandas/tests/tseries/offsets/test_business_hour.py +++ b/pandas/tests/tseries/offsets/test_business_hour.py @@ -1,6 +1,7 @@ """ Tests for offsets.BusinessHour """ + from __future__ import annotations from datetime import ( diff --git a/pandas/tests/tseries/offsets/test_business_month.py b/pandas/tests/tseries/offsets/test_business_month.py index a14451e60aa89..3ae2a115d46f7 100644 --- a/pandas/tests/tseries/offsets/test_business_month.py +++ b/pandas/tests/tseries/offsets/test_business_month.py @@ -3,6 +3,7 @@ - BMonthBegin - BMonthEnd """ + from __future__ import annotations from datetime import datetime diff --git a/pandas/tests/tseries/offsets/test_business_quarter.py b/pandas/tests/tseries/offsets/test_business_quarter.py index 3e87ab3e6d397..ab3e55c4989fb 100644 --- a/pandas/tests/tseries/offsets/test_business_quarter.py +++ b/pandas/tests/tseries/offsets/test_business_quarter.py @@ -3,6 +3,7 @@ - BQuarterBegin - BQuarterEnd """ + from __future__ import annotations from datetime import datetime diff --git a/pandas/tests/tseries/offsets/test_business_year.py b/pandas/tests/tseries/offsets/test_business_year.py index 3b7a1025cc19c..cf12b166b30e4 100644 --- a/pandas/tests/tseries/offsets/test_business_year.py +++ b/pandas/tests/tseries/offsets/test_business_year.py @@ -3,6 +3,7 @@ - BYearBegin - BYearEnd """ + from __future__ import annotations from datetime import datetime diff --git a/pandas/tests/tseries/offsets/test_custom_business_day.py b/pandas/tests/tseries/offsets/test_custom_business_day.py index 519fb712d0415..d2f309dd3f33c 100644 --- a/pandas/tests/tseries/offsets/test_custom_business_day.py +++ b/pandas/tests/tseries/offsets/test_custom_business_day.py @@ -1,6 +1,7 @@ """ Tests for offsets.CustomBusinessDay / CDay """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/tseries/offsets/test_custom_business_hour.py b/pandas/tests/tseries/offsets/test_custom_business_hour.py index 0335f415e2ec2..360ed70fa5b9e 100644 --- a/pandas/tests/tseries/offsets/test_custom_business_hour.py +++ b/pandas/tests/tseries/offsets/test_custom_business_hour.py @@ -1,6 +1,7 @@ """ Tests for offsets.CustomBusinessHour """ + from __future__ import annotations from datetime import ( diff --git a/pandas/tests/tseries/offsets/test_custom_business_month.py b/pandas/tests/tseries/offsets/test_custom_business_month.py index b74b210c3b191..fd6565e3908f3 100644 --- a/pandas/tests/tseries/offsets/test_custom_business_month.py +++ b/pandas/tests/tseries/offsets/test_custom_business_month.py @@ -4,6 +4,7 @@ - CustomBusinessMonthBegin - CustomBusinessMonthEnd """ + from __future__ import annotations from datetime import ( diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py index a355b947fc540..8ff80536fc69e 100644 --- a/pandas/tests/tseries/offsets/test_dst.py +++ b/pandas/tests/tseries/offsets/test_dst.py @@ -1,6 +1,7 @@ """ Tests for DateOffset additions over Daylight Savings Time """ + from datetime import timedelta import pytest diff --git a/pandas/tests/tseries/offsets/test_easter.py b/pandas/tests/tseries/offsets/test_easter.py index d11a72cc1b9d5..ada72d94434a3 100644 --- a/pandas/tests/tseries/offsets/test_easter.py +++ b/pandas/tests/tseries/offsets/test_easter.py @@ -2,6 +2,7 @@ Tests for the following offsets: - Easter """ + from __future__ import annotations from datetime import datetime diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py index 208f8f550086a..f442b363f737d 100644 --- a/pandas/tests/tseries/offsets/test_fiscal.py +++ b/pandas/tests/tseries/offsets/test_fiscal.py @@ -1,6 +1,7 @@ """ Tests for Fiscal Year and Fiscal Quarter offset classes """ + from datetime import datetime from dateutil.relativedelta import relativedelta diff --git a/pandas/tests/tseries/offsets/test_index.py b/pandas/tests/tseries/offsets/test_index.py index 7a62944556d11..4fb9815ba92bb 100644 --- a/pandas/tests/tseries/offsets/test_index.py +++ b/pandas/tests/tseries/offsets/test_index.py @@ -1,6 +1,7 @@ """ Tests for offset behavior with indices. """ + import pytest from pandas import ( diff --git a/pandas/tests/tseries/offsets/test_month.py b/pandas/tests/tseries/offsets/test_month.py index 2b643999c3ad3..4dd494d0872a1 100644 --- a/pandas/tests/tseries/offsets/test_month.py +++ b/pandas/tests/tseries/offsets/test_month.py @@ -5,6 +5,7 @@ - MonthBegin - MonthEnd """ + from __future__ import annotations from datetime import datetime diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index fabffa708687b..1e5bfa6033216 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -1,6 +1,7 @@ """ Tests of pandas.tseries.offsets """ + from __future__ import annotations from datetime import ( diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py index 1b4fa9292c403..99a6a583dd3e9 100644 --- a/pandas/tests/tseries/offsets/test_offsets_properties.py +++ b/pandas/tests/tseries/offsets/test_offsets_properties.py @@ -7,6 +7,7 @@ You may wish to consult the previous version for inspiration on further tests, or when trying to pin down the bugs exposed by the tests below. """ + from hypothesis import ( assume, given, diff --git a/pandas/tests/tseries/offsets/test_quarter.py b/pandas/tests/tseries/offsets/test_quarter.py index d3872b7ce9537..b92ff9d39a3ca 100644 --- a/pandas/tests/tseries/offsets/test_quarter.py +++ b/pandas/tests/tseries/offsets/test_quarter.py @@ -3,6 +3,7 @@ - QuarterBegin - QuarterEnd """ + from __future__ import annotations from datetime import datetime diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index 07e434e883c04..c8fbdfa11991a 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -1,6 +1,7 @@ """ Tests for offsets.Tick and subclasses """ + from datetime import ( datetime, timedelta, diff --git a/pandas/tests/tseries/offsets/test_week.py b/pandas/tests/tseries/offsets/test_week.py index f9a8755dc6336..d34a7953121c1 100644 --- a/pandas/tests/tseries/offsets/test_week.py +++ b/pandas/tests/tseries/offsets/test_week.py @@ -4,6 +4,7 @@ - WeekOfMonth - LastWeekOfMonth """ + from __future__ import annotations from datetime import ( diff --git a/pandas/tests/tseries/offsets/test_year.py b/pandas/tests/tseries/offsets/test_year.py index 28cbdcf6abecc..9d2a0b20e1e7c 100644 --- a/pandas/tests/tseries/offsets/test_year.py +++ b/pandas/tests/tseries/offsets/test_year.py @@ -3,6 +3,7 @@ - YearBegin - YearEnd """ + from __future__ import annotations from datetime import datetime diff --git a/pandas/tests/tslibs/test_liboffsets.py b/pandas/tests/tslibs/test_liboffsets.py index 6ffc065bb61cf..f311284b9dc63 100644 --- a/pandas/tests/tslibs/test_liboffsets.py +++ b/pandas/tests/tslibs/test_liboffsets.py @@ -1,6 +1,7 @@ """ Tests for helper functions in the cython tslibs.offsets """ + from datetime import datetime import pytest diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index 4dd9d7b20be69..d1b0595dd50e6 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -1,6 +1,7 @@ """ Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx """ + from datetime import datetime import re diff --git a/pandas/tests/util/test_assert_produces_warning.py b/pandas/tests/util/test_assert_produces_warning.py index 88e9f0d8fccee..80e3264690f81 100644 --- a/pandas/tests/util/test_assert_produces_warning.py +++ b/pandas/tests/util/test_assert_produces_warning.py @@ -1,6 +1,7 @@ -"""" +""" " Test module for testing ``pandas._testing.assert_produces_warning``. """ + import warnings import pytest diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 78626781289c4..d4a79cae61772 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -23,6 +23,7 @@ def test_foo(): For more information, refer to the ``pytest`` documentation on ``skipif``. """ + from __future__ import annotations import locale diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py index 7cfddef7ddff8..494f306ec807d 100644 --- a/pandas/util/_tester.py +++ b/pandas/util/_tester.py @@ -1,6 +1,7 @@ """ Entrypoint for testing from the top-level namespace. """ + from __future__ import annotations import os diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 1c2d6e2d38ab2..9aab19fe340ec 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -2,6 +2,7 @@ Module that contains many useful utilities for validating data or function arguments """ + from __future__ import annotations from collections.abc import ( @@ -341,13 +342,11 @@ def validate_percentile(q: float | Iterable[float]) -> np.ndarray: @overload -def validate_ascending(ascending: BoolishT) -> BoolishT: - ... +def validate_ascending(ascending: BoolishT) -> BoolishT: ... @overload -def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: - ... +def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: ... def validate_ascending( diff --git a/web/pandas_web.py b/web/pandas_web.py index 8d4f7d311b716..aac07433f2712 100755 --- a/web/pandas_web.py +++ b/web/pandas_web.py @@ -23,6 +23,7 @@ The rest of the items in the file will be added directly to the context. """ + import argparse import collections import datetime From 2f779c40a21618b393597a45f7bb0bb13c2913cc Mon Sep 17 00:00:00 2001 From: Anh Trinh Date: Thu, 7 Mar 2024 21:06:10 +0100 Subject: [PATCH 3/4] Ignore conflicted error --- scripts/validate_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index a4d53d360a12b..876e386b83abd 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -193,7 +193,7 @@ def validate_pep8(self): "flake8", "--format=%(row)d\t%(col)d\t%(code)s\t%(text)s", "--max-line-length=88", - "--ignore=E203,E3,W503,W504,E402,E731,E128,E124", + "--ignore=E203,E3,W503,W504,E402,E731,E128,E124,E704", file.name, ] response = subprocess.run(cmd, capture_output=True, check=False, text=True) From 4ee7b66e371818088ef9c7fa2acf6fe8c3122836 Mon Sep 17 00:00:00 2001 From: Anh Trinh Date: Thu, 7 Mar 2024 22:42:20 +0100 Subject: [PATCH 4/4] Ignore false positive --- pandas/tests/dtypes/test_inference.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 668e7192c0e52..96a67591f6c78 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -240,6 +240,8 @@ def test_is_list_like_generic(): # is_list_like was yielding false positives for Generic classes in python 3.11 T = TypeVar("T") + # https://github.com/pylint-dev/pylint/issues/9398 + # pylint: disable=multiple-statements class MyDataFrame(DataFrame, Generic[T]): ... tstc = MyDataFrame[int]