Skip to content

Fix 'observed' kwarg not doing anything on SeriesGroupBy #26463

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
May 30, 2019
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
a5d6d1a
Fix 'observed' kwarg not doing anything on SeriesGroupBy
krsnik93 May 19, 2019
41f49f4
Merge branch 'GH24880'
krsnik93 May 19, 2019
2575c41
Wrap long lines
krsnik93 May 19, 2019
1c02d9f
Move tests to test_categorical.py
krsnik93 May 19, 2019
7350472
Merge remote-tracking branch 'upstream/master'
krsnik93 May 20, 2019
0a949d5
Merge branch 'master' into GH24880
krsnik93 May 20, 2019
0e9f473
Parameterized tests for 'observed' kwarg on SeriesGroupBy
krsnik93 May 20, 2019
1ef54f4
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 20, 2019
cd481ad
Split test_groupby_series_observed to utilize fixtures better;Sort im…
krsnik93 May 20, 2019
a515caf
Sort imports in core/groupby/groupby.py
krsnik93 May 20, 2019
ff42dd7
Remove too specific fixtures and adjust tests
krsnik93 May 20, 2019
c22875c
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 21, 2019
cc0b725
Use literal values for indices in tests
krsnik93 May 21, 2019
629a144
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 22, 2019
e4fda22
Use MultiIndex.from_* to construct indices in tests
krsnik93 May 22, 2019
8cfa4a1
Wrap long lines
krsnik93 May 22, 2019
db176de
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 26, 2019
d520952
Enhance docstring for _reindex_output
krsnik93 May 26, 2019
3591dbc
Modify tests to reuse existing fixture
krsnik93 May 27, 2019
f97c8a1
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 27, 2019
d5c9c40
Refactor tests from a class to stand-alone functions
krsnik93 May 27, 2019
ad16db8
Simplify a test, add a docstring for the fixture and drop pd.* prefix…
krsnik93 May 28, 2019
7c525a1
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 28, 2019
e6bca5e
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 29, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 18 additions & 75 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,14 @@
from pandas.core.dtypes.missing import isna, notna

import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy, _apply_docs, _transform_template)
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.index import Index, MultiIndex
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
Expand Down Expand Up @@ -834,9 +833,10 @@ def _wrap_output(self, output, index, names=None):
return Series(output, index=index, name=name)

def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
result = self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
return self._reindex_output(result)._convert(datetime=True)

def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
Expand All @@ -856,23 +856,28 @@ def _get_index():
return index

if isinstance(values[0], dict):
# GH #823
# GH #823 #24880
index = _get_index()
result = DataFrame(values, index=index).stack()
result = self._reindex_output(DataFrame(values, index=index))
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result

if isinstance(values[0], (Series, dict)):
if isinstance(values[0], Series):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
# GH #6265 #24880
result = Series(data=values,
index=_get_index(),
name=self._selection_name)
return self._reindex_output(result)

def _aggregate_named(self, func, *args, **kwargs):
result = OrderedDict()
Expand Down Expand Up @@ -1335,7 +1340,8 @@ def _gotitem(self, key, ndim, subset=None):
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
grouper=self.grouper,
observed=self.observed)

raise AssertionError("invalid ndim for _gotitem")

Expand Down Expand Up @@ -1407,69 +1413,6 @@ def _wrap_agged_blocks(self, items, blocks):

return self._reindex_output(result)._convert(datetime=True)

def _reindex_output(self, result):
"""
If we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups);

This can re-expand the output space
"""

# we need to re-expand the output space to accomodate all values
# whether observed or not in the cartesian product of our groupes
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result

# if we only care about the observed values
# we are done
elif self.observed:
return result

# reindexing only applies to a Categorical grouper
elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings):
return result

levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()

if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)

# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.

# Select in-axis groupers
in_axis_grps = ((i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis)
g_nums, g_names = zip(*in_axis_grps)

result = result.drop(labels=list(g_names), axis=1)

# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)

# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)

return result.reset_index(drop=True)

def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
Expand Down
66 changes: 65 additions & 1 deletion pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class providing the base-class of operations.

import numpy as np

from pandas.core.arrays import Categorical
from pandas._config.config import option_context

from pandas._libs import Timestamp
Expand All @@ -42,7 +43,7 @@ class providing the base-class of operations.
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.index import Index, MultiIndex
from pandas.core.index import Index, CategoricalIndex, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter

Expand Down Expand Up @@ -2301,6 +2302,69 @@ def tail(self, n=5):
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]

def _reindex_output(self, result):
"""
If we have categorical groupers, then we want to make sure that
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you update the doc-string with Parameters / Results; type things if you can

we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups);

This can re-expand the output space
"""

# we need to re-expand the output space to accomodate all values
# whether observed or not in the cartesian product of our groupes
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result

# if we only care about the observed values
# we are done
elif self.observed:
return result

# reindexing only applies to a Categorical grouper
elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings):
return result

levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()

if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)

# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.

# Select in-axis groupers
in_axis_grps = ((i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis)
g_nums, g_names = zip(*in_axis_grps)

result = result.drop(labels=list(g_names), axis=1)

# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)

# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)

return result.reset_index(drop=True)


GroupBy._add_numeric_operations()

Expand Down
64 changes: 64 additions & 0 deletions pandas/tests/groupby/test_categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -963,3 +963,67 @@ def test_shift(fill_value):
categories=['a', 'b', 'c', 'd'], ordered=False)
res = ct.shift(1, fill_value=fill_value)
assert_equal(res, expected)


def test_groupby_series_observed():
# GH 24880
df = DataFrame({'a': ['x', 'x', 'x', 'y'],
'b': ['a', 'a', 'b', 'a'],
'c': [1, 2, 3, 4]})
df['a'] = df['a'].astype('category')
df['b'] = df['b'].astype('category')

# test .agg and .apply when observed == False
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than duplicate things in individual sections here should try and parametrize on agg/apply and True/False (for observed)

lvls = [CategoricalIndex(['x', 'y'], categories=['x', 'y'], ordered=False),
CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=False)]
index, _ = MultiIndex.from_product(lvls, names=['a', 'b']).sortlevel()
expected = pd.Series(data=[3, 3, 4, np.nan], index=index, name='c')
actual_agg = df.groupby(['a', 'b']).c.agg(sum)
actual_apply = df.groupby(['a', 'b']).c.apply(sum)
assert_series_equal(expected, actual_agg)
assert_series_equal(expected, actual_apply)

# test .agg when observed == True
index = MultiIndex.from_frame(df[['a', 'b']].drop_duplicates())
expected = pd.Series([3, 3, 4], index=index, name='c')
actual = df.groupby(['a', 'b'], observed=True).c.agg(sum)
assert_series_equal(expected, actual)

# test .apply when observed == True
index = MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'a')],
names=('a', 'b'))
expected = pd.Series([3, 3, 4], index=index, name='c')
actual = df.groupby(['a', 'b'], observed=True).c.apply(sum)
assert_series_equal(expected, actual)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you change actual to result and do assert_series_equal(result, expected)?



def test_groupby_series_observed_apply_dict():
# GH 24880
df = DataFrame({'a': ['x', 'x', 'x', 'y'],
'b': ['a', 'a', 'b', 'a'],
'c': [1, 2, 3, 4]})
df['a'] = df['a'].astype('category')
df['b'] = df['b'].astype('category')

# observed == False
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Parametrize on True/False here as well. There should already be an observed fixture defined in the top level conftest.py that you can use

Copy link
Contributor Author

@krsnik93 krsnik93 May 20, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I couldn't make use of the existing fixture due to other parameters. I also included the cases for None, in case the default changes. The tests look much cleaner now, but there's a couple of fixtures that probably won't be used often.

lvls = [CategoricalIndex(['x', 'y'], categories=['x', 'y'], ordered=False),
CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=False),
Index(['min', 'max'])]
index, _ = MultiIndex.from_product(lvls,
names=['a', 'b', None]).sortlevel()
expected = pd.Series(data=[2, 1, 3, 3, 4, 4, np.nan, np.nan],
index=index,
name='c')
actual = df.groupby(['a', 'b']).c.apply(lambda x: {'min': x.min(),
'max': x.max()})
assert_series_equal(expected, actual)

# observed == True
index = MultiIndex.from_tuples([('x', 'a', 'max'), ('x', 'a', 'min'),
('x', 'b', 'max'), ('x', 'b', 'min'),
('y', 'a', 'max'), ('y', 'a', 'min')],
names=('a', 'b', None))
expected = pd.Series(data=[2, 1, 3, 3, 4, 4], index=index, name='c')
actual = df.groupby(['a', 'b'], observed=True).c.\
apply(lambda x: {'min': x.min(), 'max': x.max()})
assert_series_equal(expected, actual)