Skip to content

Commit

Permalink
Fix some unit test deprecation warnings (#3503)
Browse files Browse the repository at this point in the history
  • Loading branch information
adamgreg committed May 26, 2022
1 parent df22b1b commit 4aef113
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 44 deletions.
28 changes: 15 additions & 13 deletions py-polars/polars/internals/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -946,7 +946,7 @@ def to_json(
json_lines: bool = False,
*,
to_string: bool = False,
) -> Optional[str]:
) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_json`
Expand Down Expand Up @@ -1131,7 +1131,7 @@ def to_csv(
file: Optional[Union[TextIO, BytesIO, str, Path]] = None,
has_header: bool = True,
sep: str = ",",
) -> Optional[str]:
) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_csv`
Expand Down Expand Up @@ -1168,7 +1168,7 @@ def to_avro(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Literal["uncompressed", "snappy", "deflate"] = "uncompressed",
) -> None:
) -> None: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_avro`
Expand Down Expand Up @@ -1207,7 +1207,7 @@ def to_ipc(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Optional[Literal["uncompressed", "lz4", "zstd"]] = "uncompressed",
) -> None:
) -> None: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_ipc`
Expand Down Expand Up @@ -1436,7 +1436,7 @@ def to_parquet(
statistics: bool = False,
use_pyarrow: bool = False,
**kwargs: Any,
) -> None:
) -> None: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_parquet`
Expand Down Expand Up @@ -1541,7 +1541,7 @@ def __getattr__(self, item: Any) -> "PySeries":
# See: https://github.com/jupyter/notebook/issues/2014
if item.startswith("_"):
raise AttributeError(item)
try:
try: # pragma: no cover
warnings.warn(
"accessing series as Attribute of a DataFrame is deprecated",
DeprecationWarning,
Expand Down Expand Up @@ -1603,7 +1603,7 @@ def __getitem__(
"""
Does quite a lot. Read the comments.
"""
if isinstance(item, pli.Expr):
if isinstance(item, pli.Expr): # pragma: no cover
warnings.warn(
"'using expressions in []' is deprecated. please use 'select'",
DeprecationWarning,
Expand Down Expand Up @@ -1764,7 +1764,7 @@ def __getitem__(

def __setitem__(
self, key: Union[str, List, Tuple[Any, Union[str, int]]], value: Any
) -> None:
) -> None: # pragma: no cover
warnings.warn(
"setting a DataFrame by indexing is deprecated; Consider using DataFrame.with_column",
DeprecationWarning,
Expand Down Expand Up @@ -2310,15 +2310,15 @@ def sort(
.sort(by, reverse, nulls_last)
.collect(no_optimization=True, string_cache=False)
)
if in_place:
if in_place: # pragma: no cover
warnings.warn(
"in-place sorting is deprecated; please use default sorting",
DeprecationWarning,
)
self._df = df._df
return self
return df
if in_place:
if in_place: # pragma: no cover
warnings.warn(
"in-place sorting is deprecated; please use default sorting",
DeprecationWarning,
Expand Down Expand Up @@ -3493,7 +3493,7 @@ def join(
The keys must be sorted to perform an asof join
"""
if how == "asof":
if how == "asof": # pragma: no cover
warnings.warn(
"using asof join via DataFrame.join is deprecated, please use DataFrame.join_asof",
DeprecationWarning,
Expand Down Expand Up @@ -5493,7 +5493,9 @@ def __getitem__(self, item: Any) -> "GBSelection[DF]":
)
return self._select(item)

def _select(self, columns: Union[str, List[str]]) -> "GBSelection[DF]":
def _select(
self, columns: Union[str, List[str]]
) -> "GBSelection[DF]": # pragma: no cover
"""
Select the columns that will be aggregated.
Expand Down Expand Up @@ -5586,7 +5588,7 @@ def get_group(self, group_value: Union[Any, Tuple[Any]]) -> DF:
df = self._dataframe_class._from_pydf(self._df)
return df[groups_idx]

def groups(self) -> DF:
def groups(self) -> DF: # pragma: no cover
"""
Return a `DataFrame` with:
Expand Down
4 changes: 2 additions & 2 deletions py-polars/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ def foods_csv() -> str:


if not os.path.isfile(FOODS_PARQUET):
pl.read_csv(FOODS_CSV).to_parquet(FOODS_PARQUET)
pl.read_csv(FOODS_CSV).write_parquet(FOODS_PARQUET)

if not os.path.isfile(FOODS_IPC):
pl.read_csv(FOODS_CSV).to_ipc(FOODS_IPC)
pl.read_csv(FOODS_CSV).write_ipc(FOODS_IPC)


@pytest.fixture
Expand Down
24 changes: 12 additions & 12 deletions py-polars/tests/test_df.py
Original file line number Diff line number Diff line change
Expand Up @@ -1090,10 +1090,10 @@ def test_column_names() -> None:

def test_lazy_functions() -> None:
df = pl.DataFrame({"a": ["foo", "bar", "2"], "b": [1, 2, 3], "c": [1.0, 2.0, 3.0]})
out = df[[pl.count("a")]]
out = df.select([pl.count("a")])
assert out["a"] == 3
assert pl.count(df["a"]) == 3
out = df[
out = df.select(
[
pl.var("b").alias("1"),
pl.std("b").alias("2"),
Expand All @@ -1106,7 +1106,7 @@ def test_lazy_functions() -> None:
pl.first("b").alias("9"),
pl.last("b").alias("10"),
]
]
)
expected = 1.0
assert np.isclose(out.select_at_idx(0), expected)
assert np.isclose(pl.var(df["b"]), expected) # type: ignore
Expand Down Expand Up @@ -1213,11 +1213,11 @@ def test_to_numpy() -> None:


def test_argsort_by(df: pl.DataFrame) -> None:
a = df[pl.argsort_by(["int_nulls", "floats"], reverse=[False, True])]["int_nulls"]
assert a == [1, 0, 3]
idx_df = df.select(pl.argsort_by(["int_nulls", "floats"], reverse=[False, True]))
assert idx_df["int_nulls"] == [1, 0, 3]

a = df[pl.argsort_by(["int_nulls", "floats"], reverse=False)]["int_nulls"]
assert a == [1, 0, 2]
idx_df = df.select(pl.argsort_by(["int_nulls", "floats"], reverse=False))
assert idx_df["int_nulls"] == [1, 0, 2]


def test_literal_series() -> None:
Expand Down Expand Up @@ -1293,7 +1293,7 @@ def test_from_rows() -> None:
def test_repeat_by() -> None:
df = pl.DataFrame({"name": ["foo", "bar"], "n": [2, 3]})

out = df[pl.col("n").repeat_by("n")]
out = df.select(pl.col("n").repeat_by("n"))
s = out["n"]
assert s[0] == [2, 2]
assert s[1] == [3, 3, 3]
Expand Down Expand Up @@ -1356,14 +1356,14 @@ def dot_product() -> None:
df = pl.DataFrame({"a": [1, 2, 3, 4], "b": [2, 2, 2, 2]})

assert df["a"].dot(df["b"]) == 20
assert df[[pl.col("a").dot("b")]][0, "a"] == 20
assert df.select([pl.col("a").dot("b")])[0, "a"] == 20


def test_hash_rows() -> None:
df = pl.DataFrame({"a": [1, 2, 3, 4], "b": [2, 2, 2, 2]})
assert df.hash_rows().dtype == pl.UInt64
assert df["a"].hash().dtype == pl.UInt64
assert df[[pl.col("a").hash().alias("foo")]]["foo"].dtype == pl.UInt64
assert df.select([pl.col("a").hash().alias("foo")])["foo"].dtype == pl.UInt64


def test_create_df_from_object() -> None:
Expand Down Expand Up @@ -1857,7 +1857,7 @@ def test_get_item() -> None:
df = pl.DataFrame({"a": [1.0, 2.0], "b": [3, 4]})

# expression
assert df[pl.col("a")].frame_equal(pl.DataFrame({"a": [1.0, 2.0]}))
assert df.select(pl.col("a")).frame_equal(pl.DataFrame({"a": [1.0, 2.0]}))

# numpy array
assert df[np.array([True, False])].frame_equal(pl.DataFrame({"a": [1.0], "b": [3]}))
Expand Down Expand Up @@ -1890,7 +1890,7 @@ def test_get_item() -> None:
# if bools, assumed to be a row mask
# if integers, assumed to be row indices
assert df[["a", "b"]].frame_equal(df)
assert df[[pl.col("a"), pl.col("b")]].frame_equal(df)
assert df.select([pl.col("a"), pl.col("b")]).frame_equal(df)
df[[1]].frame_equal(pl.DataFrame({"a": [1.0], "b": [3]}))
df[[False, True]].frame_equal(pl.DataFrame({"a": [1.0], "b": [3]}))

Expand Down
24 changes: 12 additions & 12 deletions py-polars/tests/test_lazy.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,34 +222,34 @@ def test_arange() -> None:

def test_arg_unique() -> None:
df = pl.DataFrame({"a": [4, 1, 4]})
col_a_unique = df[col("a").arg_unique()]["a"]
col_a_unique = df.select(col("a").arg_unique())["a"]
assert col_a_unique.series_equal(pl.Series("a", [0, 1]).cast(pl.UInt32))


def test_is_unique() -> None:
df = pl.DataFrame({"a": [4, 1, 4]})
assert df[col("a").is_unique()]["a"].series_equal(
assert df.select(col("a").is_unique())["a"].series_equal(
pl.Series("a", [False, True, False])
)


def test_is_first() -> None:
df = pl.DataFrame({"a": [4, 1, 4]})
assert df[col("a").is_first()]["a"].series_equal(
assert df.select(col("a").is_first())["a"].series_equal(
pl.Series("a", [True, True, False])
)


def test_is_duplicated() -> None:
df = pl.DataFrame({"a": [4, 1, 4]})
assert df[col("a").is_duplicated()]["a"].series_equal(
assert df.select(col("a").is_duplicated())["a"].series_equal(
pl.Series("a", [True, False, True])
)


def test_arg_sort() -> None:
df = pl.DataFrame({"a": [4, 1, 3]})
assert df[col("a").arg_sort()]["a"] == [1, 2, 0]
assert df.select(col("a").arg_sort())["a"] == [1, 2, 0]


def test_window_function() -> None:
Expand All @@ -272,20 +272,20 @@ def test_window_function() -> None:
out = q.collect()
assert out["cars_max_B"] == [5, 4, 5, 5, 5]

out = df[[pl.first("B").over(["fruits", "cars"]).alias("B_first")]]
out = df.select([pl.first("B").over(["fruits", "cars"]).alias("B_first")])
assert out["B_first"] == [5, 4, 3, 3, 5]


def test_when_then_flatten() -> None:
df = pl.DataFrame({"foo": [1, 2, 3], "bar": [3, 4, 5]})

assert df[
assert df.select(
when(col("foo") > 1)
.then(col("bar"))
.when(col("bar") < 3)
.then(10)
.otherwise(30)
]["bar"] == [30, 4, 5]
)["bar"] == [30, 4, 5]


def test_describe_plan() -> None:
Expand Down Expand Up @@ -333,7 +333,7 @@ def test_window_deadlock() -> None:
def test_concat_str() -> None:
df = pl.DataFrame({"a": ["a", "b", "c"], "b": [1, 2, 3]})

out = df[[pl.concat_str(["a", "b"], sep="-")]]
out = df.select([pl.concat_str(["a", "b"], sep="-")])
assert out["a"] == ["a-1", "a-2", "a-3"]

out = df.select([pl.format("foo_{}_bar_{}", pl.col("a"), "b").alias("fmt")])
Expand Down Expand Up @@ -501,7 +501,7 @@ def test_drop_nulls() -> None:

def test_all_expr() -> None:
df = pl.DataFrame({"nrs": [1, 2, 3, 4, 5, None]})
assert df[[pl.all()]].frame_equal(df)
assert df.select([pl.all()]).frame_equal(df)


def test_any_expr(fruits_cars: pl.DataFrame) -> None:
Expand Down Expand Up @@ -867,10 +867,10 @@ def test_expr_bool_cmp() -> None:
df = pl.DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 3, 4, 5]})

with pytest.raises(ValueError):
df[[pl.col("a").gt(pl.col("b")) and pl.col("b").gt(pl.col("b"))]]
df.select([pl.col("a").gt(pl.col("b")) and pl.col("b").gt(pl.col("b"))])

with pytest.raises(ValueError):
df[[pl.col("a").gt(pl.col("b")) or pl.col("b").gt(pl.col("b"))]]
df.select([pl.col("a").gt(pl.col("b")) or pl.col("b").gt(pl.col("b"))])


def test_is_in() -> None:
Expand Down
10 changes: 5 additions & 5 deletions py-polars/tests/test_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,19 +631,19 @@ def test_is_in() -> None:
assert out == [True, True, False]
df = pl.DataFrame({"a": [1.0, 2.0], "b": [1, 4]})

assert df[pl.col("a").is_in(pl.col("b")).alias("mask")]["mask"] == [True, False]
assert df.select(pl.col("a").is_in(pl.col("b"))).to_series() == [True, False]


def test_str_slice() -> None:
df = pl.DataFrame({"a": ["foobar", "barfoo"]})
assert df["a"].str.slice(-3) == ["bar", "foo"]

assert df[[pl.col("a").str.slice(2, 4)]]["a"] == ["obar", "rfoo"]
assert df.select([pl.col("a").str.slice(2, 4)])["a"] == ["obar", "rfoo"]


def test_arange_expr() -> None:
df = pl.DataFrame({"a": ["foobar", "barfoo"]})
out = df[[pl.arange(0, pl.col("a").count() * 10)]]
out = df.select([pl.arange(0, pl.col("a").count() * 10)])
assert out.shape == (20, 1)
assert out.select_at_idx(0)[-1] == 19

Expand Down Expand Up @@ -679,14 +679,14 @@ def test_reinterpret() -> None:
s = pl.Series("a", [1, 1, 2], dtype=pl.UInt64)
assert s.reinterpret(signed=True).dtype == pl.Int64
df = pl.DataFrame([s])
assert df[[pl.col("a").reinterpret(signed=True)]]["a"].dtype == pl.Int64
assert df.select([pl.col("a").reinterpret(signed=True)])["a"].dtype == pl.Int64


def test_mode() -> None:
s = pl.Series("a", [1, 1, 2])
assert s.mode() == [1]
df = pl.DataFrame([s])
assert df[[pl.col("a").mode()]]["a"] == [1]
assert df.select([pl.col("a").mode()])["a"] == [1]


def test_jsonpath_single() -> None:
Expand Down

0 comments on commit 4aef113

Please sign in to comment.