From 0be656c5d5c2ec9cb22a83582acc23dba90b2e85 Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:11:03 -0500 Subject: [PATCH 1/2] Remove skips for named aggregations --- dask/dataframe/tests/test_groupby.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/dask/dataframe/tests/test_groupby.py b/dask/dataframe/tests/test_groupby.py index b08bd5ae480..c344e63a1fb 100644 --- a/dask/dataframe/tests/test_groupby.py +++ b/dask/dataframe/tests/test_groupby.py @@ -3357,7 +3357,6 @@ def test_groupby_numeric_only_None_column_name(): ddf.groupby(lambda x: x).mean(numeric_only=False) -@pytest.mark.skipif(DASK_EXPR_ENABLED, reason="Aggregation not supported") @pytest.mark.skipif(not PANDAS_GE_140, reason="requires pandas >= 1.4.0") @pytest.mark.parametrize("shuffle_method", [True, False]) def test_dataframe_named_agg(shuffle_method): @@ -3382,7 +3381,6 @@ def test_dataframe_named_agg(shuffle_method): assert_eq(expected, actual) -@pytest.mark.skipif(DASK_EXPR_ENABLED, reason="Aggregation not supported") @pytest.mark.skipif(not PANDAS_GE_140, reason="requires pandas >= 1.4.0") @pytest.mark.parametrize("shuffle_method", [True, False]) @pytest.mark.parametrize("agg", ["count", "mean", partial(np.var, ddof=1)]) From ebb9e2a294fe888ff5fd3b7d7b2e03ba234cf9cf Mon Sep 17 00:00:00 2001 From: Patrick Hoefler <61934744+phofl@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:12:36 -0500 Subject: [PATCH 2/2] Remove another skip --- dask/dataframe/tests/test_dataframe.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/dask/dataframe/tests/test_dataframe.py b/dask/dataframe/tests/test_dataframe.py index 4a09817ff9a..7c560042f81 100644 --- a/dask/dataframe/tests/test_dataframe.py +++ b/dask/dataframe/tests/test_dataframe.py @@ -4455,9 +4455,6 @@ def test_columns_assignment(): assert_eq(df, ddf) -@pytest.mark.skipif( - DASK_EXPR_ENABLED, reason="Can't make this work with dynamic access" -) def test_attribute_assignment(): df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [1.0, 2.0, 3.0, 4.0, 5.0]}) ddf = dd.from_pandas(df, npartitions=2)