diff --git a/doc/source/whatsnew/v2.0.2.rst b/doc/source/whatsnew/v2.0.2.rst index 2b488ecec0b11..f3432564233a1 100644 --- a/doc/source/whatsnew/v2.0.2.rst +++ b/doc/source/whatsnew/v2.0.2.rst @@ -30,6 +30,7 @@ Bug fixes - Bug in :func:`api.interchange.from_dataframe` was unnecessarily raising on bitmasks (:issue:`49888`) - Bug in :func:`merge` when merging on datetime columns on different resolutions (:issue:`53200`) - Bug in :meth:`DataFrame.convert_dtypes` ignores ``convert_*`` keywords when set to False ``dtype_backend="pyarrow"`` (:issue:`52872`) +- Bug in :meth:`DataFrame.sort_values` raising for PyArrow ``dictionary`` dtype (:issue:`53232`) - Bug in :meth:`Series.describe` treating pyarrow-backed timestamps and timedeltas as categorical data (:issue:`53001`) - Bug in :meth:`Series.rename` not making a lazy copy when Copy-on-Write is enabled when a scalar is passed to it (:issue:`52450`) - Bug in :meth:`pd.array` raising for ``NumPy`` array and ``pa.large_string`` or ``pa.large_binary`` (:issue:`52590`) diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index fb56a98fc87cc..611ef142a72a5 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -269,7 +269,10 @@ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = Fal # GH50430: let pyarrow infer type, then cast scalars = pa.array(scalars, from_pandas=True) if pa_dtype: - scalars = scalars.cast(pa_dtype) + if pa.types.is_dictionary(pa_dtype): + scalars = scalars.dictionary_encode() + else: + scalars = scalars.cast(pa_dtype) arr = cls(scalars) if pa.types.is_duration(scalars.type) and scalars.null_count > 0: # GH52843: upstream bug for duration types when originally @@ -868,7 +871,10 @@ def factorize( else: data = self._data - encoded = data.dictionary_encode(null_encoding=null_encoding) + if pa.types.is_dictionary(data.type): + encoded = data + else: + encoded = data.dictionary_encode(null_encoding=null_encoding) if encoded.length() == 0: indices = np.array([], dtype=np.intp) uniques = type(self)(pa.chunked_array([], type=encoded.type.value_type)) diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index abad641819ed1..fdff9dd873fec 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -1831,6 +1831,20 @@ def test_searchsorted_with_na_raises(data_for_sorting, as_series): arr.searchsorted(b) +def test_sort_values_dictionary(): + df = pd.DataFrame( + { + "a": pd.Series( + ["x", "y"], dtype=ArrowDtype(pa.dictionary(pa.int32(), pa.string())) + ), + "b": [1, 2], + }, + ) + expected = df.copy() + result = df.sort_values(by=["a", "b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("pat", ["abc", "a[a-z]{2}"]) def test_str_count(pat): ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))