Skip to content

REF: simplify python_agg_general #51447

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 53 additions & 15 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,14 +250,28 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()

if self.ngroups == 0:
# e.g. test_evaluate_with_empty_groups without any groups to
# iterate over, we have no output on which to do dtype
# inference. We default to using the existing dtype.
# xref GH#51445
obj = self._obj_with_exclusions
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=obj.dtype,
)

if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)

try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
# KeyError raised in test_groupby.test_basic is bc the func does
# a dictionary lookup on group.name, but group name is not
# pinned in _python_agg_general, only in _aggregate_named
result = self._aggregate_named(func, *args, **kwargs)

# result is a dict whose keys are the elements of result_index
Expand All @@ -267,6 +281,15 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)

agg = aggregate

def _python_agg_general(self, func, *args, **kwargs):
func = com.is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)

obj = self._obj_with_exclusions
result = self.grouper.agg_series(obj, f)
res = obj._constructor(result, name=obj.name)
return self._wrap_aggregated_output(res)

def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
if isinstance(arg, dict):
if self.as_index:
Expand Down Expand Up @@ -308,18 +331,6 @@ def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
output = self._reindex_output(output)
return output

def _indexed_output_to_ndframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Series:
"""
Wrap the dict result of a GroupBy aggregation into a Series.
"""
assert len(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result

def _wrap_applied_output(
self,
data: Series,
Expand Down Expand Up @@ -1319,6 +1330,31 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)

agg = aggregate

def _python_agg_general(self, func, *args, **kwargs):
func = com.is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)

# iterate through "columns" ex exclusions to populate output dict
output: dict[base.OutputKey, ArrayLike] = {}

if self.ngroups == 0:
# e.g. test_evaluate_with_empty_groups different path gets different
# result dtype in empty case.
return self._python_apply_general(f, self._selected_obj, is_agg=True)

for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
result = self.grouper.agg_series(obj, f)
key = base.OutputKey(label=name, position=idx)
output[key] = result

if not output:
# e.g. test_margins_no_values_no_cols
return self._python_apply_general(f, self._selected_obj)

res = self._indexed_output_to_ndframe(output)
return self._wrap_aggregated_output(res)

def _iterate_slices(self) -> Iterable[Series]:
obj = self._selected_obj
if self.axis == 1:
Expand Down Expand Up @@ -1885,7 +1921,9 @@ def nunique(self, dropna: bool = True) -> DataFrame:

if self.axis != 0:
# see test_groupby_crash_on_nunique
return self._python_agg_general(lambda sgb: sgb.nunique(dropna))
return self._python_apply_general(
lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True
)

obj = self._obj_with_exclusions
results = self._apply_to_column_groupbys(
Expand Down
28 changes: 0 additions & 28 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1409,34 +1409,6 @@ def _python_apply_general(
is_transform,
)

# TODO: I (jbrockmendel) think this should be equivalent to doing grouped_reduce
# on _agg_py_fallback, but trying that here fails a bunch of tests 2023-02-07.
@final
def _python_agg_general(self, func, *args, **kwargs):
func = com.is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)

# iterate through "columns" ex exclusions to populate output dict
output: dict[base.OutputKey, ArrayLike] = {}

if self.ngroups == 0:
# e.g. test_evaluate_with_empty_groups different path gets different
# result dtype in empty case.
return self._python_apply_general(f, self._selected_obj, is_agg=True)

for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
result = self.grouper.agg_series(obj, f)
key = base.OutputKey(label=name, position=idx)
output[key] = result

if not output:
# e.g. test_groupby_crash_on_nunique, test_margins_no_values_no_cols
return self._python_apply_general(f, self._selected_obj)

res = self._indexed_output_to_ndframe(output)
return self._wrap_aggregated_output(res)

@final
def _agg_general(
self,
Expand Down