Skip to content

Extends DataFrame.groupby overloads to recognize some scalar index types #679

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions pandas-stubs/_typing.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,29 @@ Function: TypeAlias = np.ufunc | Callable[..., Any]
# shared HashableT and HashableT#. This one can be used if the identical
# type is need in a function that uses GroupByObjectNonScalar
_HashableTa = TypeVar("_HashableTa", bound=Hashable)
ByT = TypeVar(
"ByT",
str,
bytes,
datetime.date,
datetime.datetime,
datetime.timedelta,
np.datetime64,
np.timedelta64,
bool,
int,
float,
complex,
Timestamp,
Timedelta,
Scalar,
Period,
Interval[int],
Interval[float],
Interval[Timestamp],
Interval[Timedelta],
tuple,
)
GroupByObjectNonScalar: TypeAlias = (
tuple
| list[_HashableTa]
Expand Down
95 changes: 89 additions & 6 deletions pandas-stubs/core/frame.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,22 @@ from typing import (
from matplotlib.axes import Axes as PlotAxes
import numpy as np
from pandas import (
Period,
Timedelta,
Timestamp,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.generic import NDFrame
from pandas.core.groupby.generic import (
_DataFrameGroupByNonScalar,
_DataFrameGroupByScalar,
)
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexers import BaseIndexer
from pandas.core.indexes.base import Index
from pandas.core.indexes.category import CategoricalIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.interval import IntervalIndex
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import (
_iLocIndexer,
_IndexSliceTuple,
Expand Down Expand Up @@ -82,6 +86,7 @@ from pandas._typing import (
IndexLabel,
IndexType,
IntervalClosedType,
IntervalT,
JoinHow,
JsonFrameOrient,
Label,
Expand Down Expand Up @@ -1011,7 +1016,85 @@ class DataFrame(NDFrame, OpsMixin):
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> _DataFrameGroupByScalar: ...
) -> DataFrameGroupBy[Scalar]: ...
@overload
def groupby( # type: ignore[misc] # pyright: ignore[reportOverlappingOverload]
self,
by: DatetimeIndex,
axis: Axis = ...,
level: Level | None = ...,
as_index: _bool = ...,
sort: _bool = ...,
group_keys: _bool = ...,
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> DataFrameGroupBy[Timestamp]: ...
@overload
def groupby( # type: ignore[misc]
self,
by: TimedeltaIndex,
axis: Axis = ...,
level: Level | None = ...,
as_index: _bool = ...,
sort: _bool = ...,
group_keys: _bool = ...,
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> DataFrameGroupBy[Timedelta]: ...
@overload
def groupby( # type: ignore[misc]
self,
by: PeriodIndex,
axis: Axis = ...,
level: Level | None = ...,
as_index: _bool = ...,
sort: _bool = ...,
group_keys: _bool = ...,
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> DataFrameGroupBy[Period]: ...
@overload
def groupby( # type: ignore[misc]
self,
by: IntervalIndex[IntervalT],
axis: Axis = ...,
level: Level | None = ...,
as_index: _bool = ...,
sort: _bool = ...,
group_keys: _bool = ...,
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> DataFrameGroupBy[IntervalT]: ...
@overload
def groupby(
self,
by: MultiIndex,
axis: Axis = ...,
level: Level | None = ...,
as_index: _bool = ...,
sort: _bool = ...,
group_keys: _bool = ...,
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> DataFrameGroupBy[tuple]: ...
@overload
def groupby(
self,
by: CategoricalIndex | Index,
axis: Axis = ...,
level: Level | None = ...,
as_index: _bool = ...,
sort: _bool = ...,
group_keys: _bool = ...,
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> DataFrameGroupBy[Any]: ...
@overload
def groupby(
self,
Expand All @@ -1024,7 +1107,7 @@ class DataFrame(NDFrame, OpsMixin):
squeeze: _bool = ...,
observed: _bool = ...,
dropna: _bool = ...,
) -> _DataFrameGroupByNonScalar: ...
) -> DataFrameGroupBy[tuple]: ...
def pivot(
self,
*,
Expand Down
12 changes: 4 additions & 8 deletions pandas-stubs/core/groupby/generic.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ from pandas._typing import (
AggFuncTypeBase,
AggFuncTypeFrame,
Axis,
ByT,
Level,
ListLike,
RandomState,
Expand Down Expand Up @@ -146,13 +147,7 @@ class SeriesGroupBy(GroupBy, Generic[S1]):
def idxmax(self, axis: Axis = ..., skipna: bool = ...) -> Series: ...
def idxmin(self, axis: Axis = ..., skipna: bool = ...) -> Series: ...

class _DataFrameGroupByScalar(DataFrameGroupBy):
def __iter__(self) -> Iterator[tuple[Scalar, DataFrame]]: ...

class _DataFrameGroupByNonScalar(DataFrameGroupBy):
def __iter__(self) -> Iterator[tuple[tuple, DataFrame]]: ...

class DataFrameGroupBy(GroupBy):
class DataFrameGroupBy(GroupBy, Generic[ByT]):
def any(self, skipna: bool = ...) -> DataFrame: ...
def all(self, skipna: bool = ...) -> DataFrame: ...
# error: Overload 3 for "apply" will never be used because its parameters overlap overload 1
Expand All @@ -178,7 +173,7 @@ class DataFrameGroupBy(GroupBy):
@overload
def __getitem__(self, item: str) -> SeriesGroupBy: ...
@overload
def __getitem__(self, item: list[str]) -> DataFrameGroupBy: ...
def __getitem__(self, item: list[str]) -> DataFrameGroupBy[ByT]: ...
def count(self) -> DataFrame: ...
def boxplot(
self,
Expand Down Expand Up @@ -364,3 +359,4 @@ class DataFrameGroupBy(GroupBy):
dropna: bool = ...,
) -> Series[float]: ...
def __getattr__(self, name: str) -> SeriesGroupBy: ...
def __iter__(self) -> Iterator[tuple[ByT, DataFrame]]: ...
94 changes: 94 additions & 0 deletions tests/test_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1972,13 +1972,107 @@ def test_groupby_result() -> None:
check(assert_type(index2, Scalar), int)
check(assert_type(value2, pd.DataFrame), pd.DataFrame)

# GH 674
# grouping by pd.MultiIndex should always resolve to a tuple as well
multi_index = pd.MultiIndex.from_frame(df[["a", "b"]])
iterator3 = df.groupby(multi_index).__iter__()
assert_type(iterator3, Iterator[Tuple[Tuple, pd.DataFrame]])
index3, value3 = next(iterator3)
assert_type((index3, value3), Tuple[Tuple, pd.DataFrame])

check(assert_type(index3, Tuple), tuple, int)
check(assert_type(value3, pd.DataFrame), pd.DataFrame)

# Want to make sure these cases are differentiated
for (k1, k2), g in df.groupby(["a", "b"]):
pass

for kk, g in df.groupby("a"):
pass

for (k1, k2), g in df.groupby(multi_index):
pass


def test_groupby_result_for_scalar_indexes() -> None:
# GH 674
dates = pd.date_range("2020-01-01", "2020-12-31")
df = pd.DataFrame({"date": dates, "days": 1})
period_index = pd.PeriodIndex(df.date, freq="M")
iterator = df.groupby(period_index).__iter__()
assert_type(iterator, Iterator[Tuple[pd.Period, pd.DataFrame]])
index, value = next(iterator)
assert_type((index, value), Tuple[pd.Period, pd.DataFrame])

check(assert_type(index, pd.Period), pd.Period)
check(assert_type(value, pd.DataFrame), pd.DataFrame)

dt_index = pd.DatetimeIndex(dates)
iterator2 = df.groupby(dt_index).__iter__()
assert_type(iterator2, Iterator[Tuple[pd.Timestamp, pd.DataFrame]])
index2, value2 = next(iterator2)
assert_type((index2, value2), Tuple[pd.Timestamp, pd.DataFrame])

check(assert_type(index2, pd.Timestamp), pd.Timestamp)
check(assert_type(value2, pd.DataFrame), pd.DataFrame)

tdelta_index = pd.TimedeltaIndex(dates - pd.Timestamp("2020-01-01"))
iterator3 = df.groupby(tdelta_index).__iter__()
assert_type(iterator3, Iterator[Tuple[pd.Timedelta, pd.DataFrame]])
index3, value3 = next(iterator3)
assert_type((index3, value3), Tuple[pd.Timedelta, pd.DataFrame])

check(assert_type(index3, pd.Timedelta), pd.Timedelta)
check(assert_type(value3, pd.DataFrame), pd.DataFrame)

intervals: list[pd.Interval[pd.Timestamp]] = [
pd.Interval(date, date + pd.DateOffset(days=1), closed="left") for date in dates
]
interval_index = pd.IntervalIndex(intervals)
assert_type(interval_index, "pd.IntervalIndex[pd.Interval[pd.Timestamp]]")
iterator4 = df.groupby(interval_index).__iter__()
assert_type(iterator4, Iterator[Tuple["pd.Interval[pd.Timestamp]", pd.DataFrame]])
index4, value4 = next(iterator4)
assert_type((index4, value4), Tuple["pd.Interval[pd.Timestamp]", pd.DataFrame])

check(assert_type(index4, "pd.Interval[pd.Timestamp]"), pd.Interval)
check(assert_type(value4, pd.DataFrame), pd.DataFrame)

for p, g in df.groupby(period_index):
pass

for dt, g in df.groupby(dt_index):
pass

for tdelta, g in df.groupby(tdelta_index):
pass

for interval, g in df.groupby(interval_index):
pass


def test_groupby_result_for_ambiguous_indexes() -> None:
# GH 674
df = pd.DataFrame({"a": [0, 1, 2], "b": [4, 5, 6], "c": [7, 8, 9]})
# this will use pd.Index which is ambiguous
iterator = df.groupby(df.index).__iter__()
assert_type(iterator, Iterator[Tuple[Any, pd.DataFrame]])
index, value = next(iterator)
assert_type((index, value), Tuple[Any, pd.DataFrame])

check(assert_type(index, Any), int)
check(assert_type(value, pd.DataFrame), pd.DataFrame)

# categorical indexes are also ambiguous
categorical_index = pd.CategoricalIndex(df.a)
iterator2 = df.groupby(categorical_index).__iter__()
assert_type(iterator2, Iterator[Tuple[Any, pd.DataFrame]])
index2, value2 = next(iterator2)
assert_type((index2, value2), Tuple[Any, pd.DataFrame])

check(assert_type(index2, Any), int)
check(assert_type(value2, pd.DataFrame), pd.DataFrame)


def test_setitem_list():
# GH 153
Expand Down