Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Preserve EA dtype in DataFrame.stack #23285

Merged
merged 16 commits into from
Nov 8, 2018
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/source/whatsnew/v0.24.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -849,7 +849,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your
- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`)
- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`).
- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`)
- :meth:`Series.unstack` no longer converts extension arrays to object-dtype ndarrays. The output ``DataFrame`` will now have the same dtype as the input. This changes behavior for Categorical and Sparse data (:issue:`23077`).
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was accidentally added in the PeriodArray PR. Will be implemented for good in #23284

- :meth:`DataFrame.stack` no longer converts to object dtype for DataFrames where each column has the same extension dtype. The output Series will have the same dtype as the columns (:issue:`23077`).

.. _whatsnew_0240.api.incompatibilities:

Expand Down
6 changes: 5 additions & 1 deletion pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
is_list_like,
is_re,
is_re_compilable,
is_sparse,
pandas_dtype)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
Expand Down Expand Up @@ -632,7 +633,10 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
return self

if klass is None:
if dtype == np.object_:
if is_sparse(self.values):
# Series[Sparse].astype(object) is sparse.
klass = ExtensionBlock
elif is_object_dtype(dtype):
klass = ObjectBlock
elif is_extension_array_dtype(dtype):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

so maybe should just move the is_extension_array_dtype up to first, and add a is_extension_dtype(self.values) test as well (should encompas your is_sparse check) and is more general

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll make that change and run the test suite.

I was kinda worried about "false positives" here, but I suppose it's exactly what we want if an extension array claims it's object dtype.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As posted in the unstack PR, we need to special case Space here, since it's the only (internal) extension type that has special .astype(object) behavior.

klass = ExtensionBlock
Expand Down
62 changes: 57 additions & 5 deletions pandas/core/reshape/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,8 +465,9 @@ def factorize(index):
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type([
col for _, col in frame.iteritems()
col._values for _, col in frame.iteritems()
])
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame.values.ravel()
Expand Down Expand Up @@ -595,16 +596,32 @@ def _convert_level_number(level_num, columns):
slice_len = loc.stop - loc.start

if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk = this[this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.loc[:, this.columns[loc]].values
if (frame._is_homogeneous_type and
is_extension_array_dtype(frame.dtypes.iloc[0])):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]

value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.iteritems()]
)
N, K = this.shape
TomAugspurger marked this conversation as resolved.
Show resolved Hide resolved
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)

elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]

new_data[key] = value_slice.ravel()
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()

new_data[key] = value_slice

if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
Expand Down Expand Up @@ -942,3 +959,38 @@ def make_axis_dummies(frame, axis='minor', transform=None):
values = values.take(labels, axis=0)

return DataFrame(values, columns=items, index=frame.index)


def _reorder_for_extension_array_stack(arr, n_rows, n_columns):
"""
Re-orders the values when stacking multiple extension-arrays.

The indirect stacking method used for EAs requires a followup
take to get the order correct.

Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.

Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately

Examples
--------
>>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')

>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
return arr.take(idx)
22 changes: 22 additions & 0 deletions pandas/tests/extension/base/reshaping.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,3 +170,25 @@ def test_merge(self, data, na_value):
[data[0], data[0], data[1], data[2], na_value],
dtype=data.dtype)})
self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']])

@pytest.mark.parametrize("columns", [
["A", "B"],
pd.MultiIndex.from_tuples([('A', 'a'), ('A', 'b')],
names=['outer', 'inner']),
])
def test_stack(self, data, columns):
df = pd.DataFrame({"A": data[:5], "B": data[:5]})
df.columns = columns
result = df.stack()
expected = df.astype(object).stack()
# we need a second astype(object), in case the constructor inferred
# object -> specialized, as is done for period.
expected = expected.astype(object)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is kinda strange. For DataFrame[ndarray[object]].stack() of all periods, we actually infer period-dtype. Do we want that, or should we explicitly pass dtype=object when creating the new series / frame to ensure that we don't infer the "correct" dtype?

In [1]: import pandas as pd

In [2]: a = pd.core.arrays.period_array(['2000', '2001'], freq='D')

In [3]: pd.DataFrame({"A": a, "B": a}).astype(object).dtypes
Out[3]:
A    object
B    object
dtype: object

In [4]: pd.DataFrame({"A": a, "B": a}).astype(object).stack().dtype
Out[4]: period[D]

(that's on master)


if isinstance(expected, pd.Series):
assert result.dtype == df.iloc[:, 0].dtype
else:
assert all(result.dtypes == df.iloc[:, 0].dtype)

result = result.astype(object)
self.assert_equal(result, expected)
9 changes: 8 additions & 1 deletion pandas/tests/extension/json/test_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,14 @@ def test_from_dtype(self, data):


class TestReshaping(BaseJSON, base.BaseReshapingTests):
pass

@pytest.mark.skip(reason="Different definitions of NA")
def test_stack(self):
"""
The test does .astype(object).stack(). If we happen to have
any missing values in `data`, then we'll end up with different
rows since we consider `{}` NA, but `.astype(object)` doesn't.
"""


class TestGetitem(BaseJSON, base.BaseGetitemTests):
Expand Down
11 changes: 11 additions & 0 deletions pandas/tests/frame/test_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -874,6 +874,17 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels):

tm.assert_series_equal(result, expected)

def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(['a', 'a', 'b', 'c'])
df = pd.DataFrame({"A": cat, "B": cat})
result = df.stack()
index = pd.MultiIndex.from_product([[0, 1, 2, 3], ['A', 'B']])
expected = pd.Series(pd.Categorical(['a', 'a', 'a', 'a',
'b', 'b', 'c', 'c']),
index=index)
tm.assert_series_equal(result, expected)

@pytest.mark.parametrize("level", [0, 'baz'])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
Expand Down
10 changes: 10 additions & 0 deletions pandas/tests/sparse/frame/test_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -736,6 +736,16 @@ def test_astype_bool(self):
assert res['A'].dtype == SparseDtype(np.bool)
assert res['B'].dtype == SparseDtype(np.bool)

def test_astype_object(self):
# This may change in GH-23125
df = pd.DataFrame({"A": SparseArray([0, 1]),
"B": SparseArray([0, 1])})
result = df.astype(object)
dtype = SparseDtype(object, 0)
expected = pd.DataFrame({"A": SparseArray([0, 1], dtype=dtype),
"B": SparseArray([0, 1], dtype=dtype)})
tm.assert_frame_equal(result, expected)

def test_fillna(self, float_frame_fill0, float_frame_fill0_dense):
df = float_frame_fill0.reindex(lrange(5))
dense = float_frame_fill0_dense.reindex(lrange(5))
Expand Down