Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BUG: addtl fix for compat summary of groupby/resample with dicts #12329

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/source/whatsnew/v0.18.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ other anchored offsets like ``MonthBegin`` and ``YearBegin``.
Resample API
^^^^^^^^^^^^

Like the change in the window functions API :ref:`above <whatsnew_0180.enhancements.moments>`, ``.resample(...)`` is changing to have a more groupby-like API. (:issue:`11732`, :issue:`12702`, :issue:`12202`).
Like the change in the window functions API :ref:`above <whatsnew_0180.enhancements.moments>`, ``.resample(...)`` is changing to have a more groupby-like API. (:issue:`11732`, :issue:`12702`, :issue:`12202`, :issue:`12332`).

.. ipython:: python

Expand Down
23 changes: 14 additions & 9 deletions pandas/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -2526,7 +2526,8 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
return getattr(self, func_or_funcs)(*args, **kwargs)

if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs, _level)
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
Expand All @@ -2546,6 +2547,18 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')

# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat

# our result is a Series-like
if len(ret) == 1:
ret = concat([r for r in ret.values()],
axis=1)

# our result is a DataFrame like
else:
ret = concat(ret, axis=1)
return ret

agg = aggregate
Expand All @@ -2571,14 +2584,6 @@ def _aggregate_multiple_funcs(self, arg, _level):
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)

# for a ndim=1, disallow a nested dict for an aggregator as
# this is a mis-specification of the aggregations, via a
# specificiation error
# e.g. g['A'].agg({'A': ..., 'B': ...})
if self.name in columns and len(columns) > 1:
raise SpecificationError('invalid aggregation names specified '
'for selected objects')

results = {}
for name, func in arg:
obj = self
Expand Down
7 changes: 7 additions & 0 deletions pandas/tests/test_groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1558,6 +1558,13 @@ def f():
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
assert_frame_equal(result, expected, check_like=True)

# same name as the original column
# GH9052
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
assert_frame_equal(result, expected, check_like=True)

def test_multi_iter(self):
s = Series(np.arange(6))
k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
Expand Down
2 changes: 1 addition & 1 deletion pandas/tseries/resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ def _downsample(self, how, **kwargs):
# do we have a regular frequency
if ax.freq is not None or ax.inferred_freq is not None:

if len(self.grouper.binlabels) > len(ax):
if len(self.grouper.binlabels) > len(ax) and how is None:

# let's do an asfreq
return self.asfreq()
Expand Down
44 changes: 32 additions & 12 deletions pandas/tseries/tests/test_resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,25 +419,32 @@ def test_agg_misc(self):
assert_frame_equal(result, expected, check_like=True)

# series like aggs
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
expected.columns = ['sum', 'std']

for t in [r, g]:
result = r['A'].agg({'A': ['sum', 'std']})
result = t['A'].agg({'A': ['sum', 'std']})
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
expected.columns = ['sum', 'std']

assert_frame_equal(result, expected, check_like=True)

expected = pd.concat([t['A'].agg(['sum', 'std']),
t['A'].agg(['mean', 'std'])],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
result = t['A'].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know if this is new in this PR (or if it already worked in master), but I don't think it is needed that we allow this? (in any case it errored in 0.17.1, so we can make a choice here)
Nested dicts don't seem to make much sense for SeriesGroupBy.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And if we do this, shouldn't examples below give a MultiIndex?


In [15]: r['A'].agg({'A': ['sum', 'std']})
Out[15]:
                          sum       std
2010-01-01 09:00:00  0.629247  0.174096
2010-01-01 09:00:02  1.056440  0.356036
2010-01-01 09:00:04  1.315957  0.424492
2010-01-01 09:00:06  1.053714  0.221474
2010-01-01 09:00:08  1.275910  0.190737

In [16]: r['A'].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
Out[16]:
                            A                   B
                          sum       std      mean       std
2010-01-01 09:00:00  0.629247  0.174096  0.314624  0.174096
2010-01-01 09:00:02  1.056440  0.356036  0.528220  0.356036
2010-01-01 09:00:04  1.315957  0.424492  0.657978  0.424492
2010-01-01 09:00:06  1.053714  0.221474  0.526857  0.221474
2010-01-01 09:00:08  1.275910  0.190737  0.637955  0.190737

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

its not a nested dict, is exactly like the example that @xflr6 gave, that was my comment. The A acts on the actual data, while the B acts on the SAME data (and NOT B), and is just 'named' B

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In [4]: g['D'].agg({'D': np.sum, 'result2': np.mean})
Out[4]: 
          result2   D
A   B                
bar one  1.000000   1
    two  4.000000   8
foo one  3.000000   6
    two  4.333333  13

In [5]: g['D'].agg({'D': np.sum, 'C': np.mean})
Out[5]: 
                C   D
A   B                
bar one  1.000000   1
    two  4.000000   8
foo one  3.000000   6
    two  4.333333  13

(groupby and resample work the same).

That was my point though. maybe we should raise here as the user could think they are actually operating on something they are not.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In [7]: g.agg({'D': np.sum, 'C': np.mean})
Out[7]: 
                C   D
A   B                
bar one  1.323675   1
    two  1.946188   8
foo one  0.934114   6
    two  1.871956  13

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I had it doing like [4] at one point in time (whether you have a 'single' result or multiple as I agree its more consistent. But its ok where it is now, so I don't think should change unless its really compelling. Its clear by what you are passing what you should get back.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's just a bit strange and inconsistent that the key of the dict ('C') in [5] is completely ignored, while in [4] the keys of the dict are used as column names, while only the length of the dict differs

maybe we should raise here as the user could think they are actually operating on something they are not.

I would be +1 to raise here, but in the way it was in 0.17.1: raising for g['D'].agg({'C': ['sum', 'std']}) but not for g['D'].agg({'C': 'sum', 'D': 'std']})

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In [4]: g['D'].agg({'C': ['sum', 'std']})
Out[4]: 
         sum       std
A   B                 
bar one    1       NaN
    two    8  1.414214
foo one    6  4.242641
    two   13  2.516611

In [5]: g['D'].agg({'C': 'sum', 'D' : 'std'})
Out[5]: 
          C         D
A   B                
bar one   1       NaN
    two   8  1.414214
foo one   6  4.242641
    two  13  2.516611

in 0.17.1

In [3]: g['D'].agg({'C': ['sum', 'std']})
ValueError: If using all scalar values, you must pass an index

In [4]: g['D'].agg({'C': 'sum', 'D' : 'std'})
Out[4]: 
          C         D
A   B                
bar one   1       NaN
    two   8  1.414214
foo one   6  4.242641
    two  13  2.516611

I think [3] for 0.17.1 was just a bug. It should have worked.

Looking at this again, this is very tricky. When shoulud you raise? e.g. just because the key of the dict is not in the columns isn't good enough (or even if its a column in the top-level frame), it HAS to be a renaming key.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ahh, but you would be ok with the current if [4] (from 0.18.0), has a multi-level index, right? (e.g. ('C','sum'),('C','std')

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ahh, but you would be ok with the current if [4](from 0.18.0), has a multi-level index, right? (e.g. ('C','sum'),('C','std')

yes (so key of dict is always seen as renaming key, independent of length of dict)

When shoulud you raise? e.g. just because the key of the dict is not in the columns isn't good enough (or even if its a column in the top-level frame), it HAS to be a renaming key.

If we would want to raise, I think it should not depend on the value of the key (if it is equal to the column name or not), but on the dimension of the values (scalar -> then it is a simple renaming {'my_col_name': my_func} which is OK; when length > 1 -> then it can raise). But indeed probably easier to just allow it, but to be consistent in the handling of the keys as the column names in the multi-index

assert_frame_equal(result, expected, check_like=True)

# errors
# invalid names in the agg specification
for t in [r, g]:

# invalid names in the agg specification
def f():
r['A'].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
self.assertRaises(SpecificationError, f)

def f():
r[['A']].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']})
r[['A']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
self.assertRaises(SpecificationError, f)

def test_agg_nested_dicts(self):
Expand Down Expand Up @@ -918,6 +925,19 @@ def test_resample_ohlc(self):
self.assertEqual(xs['low'], s[:5].min())
self.assertEqual(xs['close'], s[4])

def test_resample_ohlc_result(self):

# GH 12332
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
s = Series(range(len(index)), index=index)

a = s.loc[:'4-15-2000'].resample('30T').ohlc()
self.assertIsInstance(a, DataFrame)

b = s.loc[:'4-14-2000'].resample('30T').ohlc()
self.assertIsInstance(b, DataFrame)

def test_resample_ohlc_dataframe(self):
df = (
pd.DataFrame({
Expand Down