Skip to content

Fix 'observed' kwarg not doing anything on SeriesGroupBy #26463

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
May 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
a5d6d1a
Fix 'observed' kwarg not doing anything on SeriesGroupBy
krsnik93 May 19, 2019
41f49f4
Merge branch 'GH24880'
krsnik93 May 19, 2019
2575c41
Wrap long lines
krsnik93 May 19, 2019
1c02d9f
Move tests to test_categorical.py
krsnik93 May 19, 2019
7350472
Merge remote-tracking branch 'upstream/master'
krsnik93 May 20, 2019
0a949d5
Merge branch 'master' into GH24880
krsnik93 May 20, 2019
0e9f473
Parameterized tests for 'observed' kwarg on SeriesGroupBy
krsnik93 May 20, 2019
1ef54f4
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 20, 2019
cd481ad
Split test_groupby_series_observed to utilize fixtures better;Sort im…
krsnik93 May 20, 2019
a515caf
Sort imports in core/groupby/groupby.py
krsnik93 May 20, 2019
ff42dd7
Remove too specific fixtures and adjust tests
krsnik93 May 20, 2019
c22875c
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 21, 2019
cc0b725
Use literal values for indices in tests
krsnik93 May 21, 2019
629a144
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 22, 2019
e4fda22
Use MultiIndex.from_* to construct indices in tests
krsnik93 May 22, 2019
8cfa4a1
Wrap long lines
krsnik93 May 22, 2019
db176de
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 26, 2019
d520952
Enhance docstring for _reindex_output
krsnik93 May 26, 2019
3591dbc
Modify tests to reuse existing fixture
krsnik93 May 27, 2019
f97c8a1
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 27, 2019
d5c9c40
Refactor tests from a class to stand-alone functions
krsnik93 May 27, 2019
ad16db8
Simplify a test, add a docstring for the fixture and drop pd.* prefix…
krsnik93 May 28, 2019
7c525a1
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 28, 2019
e6bca5e
Merge remote-tracking branch 'upstream/master' into GH24880
krsnik93 May 29, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/source/whatsnew/v0.25.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -512,6 +512,7 @@ Groupby/Resample/Rolling
- Bug in :func:`pandas.core.groupby.GroupBy.agg` when applying a aggregation function to timezone aware data (:issue:`23683`)
- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` where timezone information would be dropped (:issue:`21603`)
- Bug in :func:`pandas.core.groupby.GroupBy.size` when grouping only NA values (:issue:`23050`)
- Bug in :func:`Series.groupby` where ``observed`` kwarg was previously ignored (:issue:`24880`)
- Bug in :func:`Series.groupby` where using ``groupby`` with a :class:`MultiIndex` Series with a list of labels equal to the length of the series caused incorrect grouping (:issue:`25704`)
- Ensured that ordering of outputs in ``groupby`` aggregation functions is consistent across all versions of Python (:issue:`25692`)
- Ensured that result group order is correct when grouping on an ordered ``Categorical`` and specifying ``observed=True`` (:issue:`25871`, :issue:`25167`)
Expand Down
93 changes: 18 additions & 75 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,14 @@
from pandas.core.dtypes.missing import isna, notna

import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy, _apply_docs, _transform_template)
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.index import Index, MultiIndex
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
Expand Down Expand Up @@ -834,9 +833,10 @@ def _wrap_output(self, output, index, names=None):
return Series(output, index=index, name=name)

def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
result = self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
return self._reindex_output(result)._convert(datetime=True)

def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
Expand All @@ -856,23 +856,28 @@ def _get_index():
return index

if isinstance(values[0], dict):
# GH #823
# GH #823 #24880
index = _get_index()
result = DataFrame(values, index=index).stack()
result = self._reindex_output(DataFrame(values, index=index))
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result

if isinstance(values[0], (Series, dict)):
if isinstance(values[0], Series):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
# GH #6265 #24880
result = Series(data=values,
index=_get_index(),
name=self._selection_name)
return self._reindex_output(result)

def _aggregate_named(self, func, *args, **kwargs):
result = OrderedDict()
Expand Down Expand Up @@ -1335,7 +1340,8 @@ def _gotitem(self, key, ndim, subset=None):
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
grouper=self.grouper,
observed=self.observed)

raise AssertionError("invalid ndim for _gotitem")

Expand Down Expand Up @@ -1407,69 +1413,6 @@ def _wrap_agged_blocks(self, items, blocks):

return self._reindex_output(result)._convert(datetime=True)

def _reindex_output(self, result):
"""
If we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups);

This can re-expand the output space
"""

# we need to re-expand the output space to accomodate all values
# whether observed or not in the cartesian product of our groupes
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result

# if we only care about the observed values
# we are done
elif self.observed:
return result

# reindexing only applies to a Categorical grouper
elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings):
return result

levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()

if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)

# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.

# Select in-axis groupers
in_axis_grps = ((i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis)
g_nums, g_names = zip(*in_axis_grps)

result = result.drop(labels=list(g_names), axis=1)

# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)

# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)

return result.reset_index(drop=True)

def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
Expand Down
76 changes: 75 additions & 1 deletion pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,14 @@ class providing the base-class of operations.
from pandas.api.types import (
is_datetime64_dtype, is_integer_dtype, is_object_dtype)
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical
from pandas.core.base import (
DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError)
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.index import Index, MultiIndex
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter

Expand Down Expand Up @@ -2301,6 +2302,79 @@ def tail(self, n=5):
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]

def _reindex_output(self, output):
"""
If we have categorical groupers, then we might want to make sure that
we have a fully re-indexed output to the levels. This means expanding
the output space to accommodate all values in the cartesian product of
our groups, regardless of whether they were observed in the data or
not. This will expand the output space if there are missing groups.

The method returns early without modifying the input if the number of
groupings is less than 2, self.observed == True or none of the groupers
are categorical.

Parameters
----------
output: Series or DataFrame
Object resulting from grouping and applying an operation.

Returns
-------
Series or DataFrame
Object (potentially) re-indexed to include all possible groups.
"""
groupings = self.grouper.groupings
if groupings is None:
return output
elif len(groupings) == 1:
return output

# if we only care about the observed values
# we are done
elif self.observed:
return output

# reindexing only applies to a Categorical grouper
elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings):
return output

levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()

if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return output.reindex(**d)

# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `output`. An idea is to do:
# output = output.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `output`, and then reset the in-axis grouper columns.

# Select in-axis groupers
in_axis_grps = ((i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis)
g_nums, g_names = zip(*in_axis_grps)

output = output.drop(labels=list(g_names), axis=1)

# Set a temp index and reindex (possibly expanding)
output = output.set_index(self.grouper.result_index
).reindex(index, copy=False)

# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
output = output.reset_index(level=g_nums)

return output.reset_index(drop=True)


GroupBy._add_numeric_operations()

Expand Down
Loading