@@ -149,7 +149,7 @@ class providing the base-class of operations.
149
149
150
150
from pandas .core .resample import Resampler
151
151
from pandas .core .window import (
152
- ExpandingGroupby ,
152
+ ExpandingGroupBy ,
153
153
ExponentialMovingWindowGroupby ,
154
154
RollingGroupby ,
155
155
)
@@ -1415,7 +1415,7 @@ def curried(x):
1415
1415
if self .grouper .has_dropped_na and is_transform :
1416
1416
# result will have dropped rows due to nans, fill with null
1417
1417
# and ensure index is ordered same as the input
1418
- result = self ._set_result_index_ordered (result )
1418
+ result = self ._set_agg_index_ordered (result )
1419
1419
return result
1420
1420
1421
1421
# -----------------------------------------------------------------
@@ -1433,7 +1433,7 @@ def _concat_objects(
1433
1433
if self .group_keys and not is_transform :
1434
1434
if self .as_index :
1435
1435
# possible MI return case
1436
- group_keys = self .grouper .result_index
1436
+ group_keys = self .grouper .agg_index
1437
1437
group_levels = self .grouper .levels
1438
1438
group_names = self .grouper .names
1439
1439
@@ -1490,10 +1490,10 @@ def _concat_objects(
1490
1490
return result
1491
1491
1492
1492
@final
1493
- def _set_result_index_ordered (
1493
+ def _set_agg_index_ordered (
1494
1494
self , result : OutputFrameOrSeries
1495
1495
) -> OutputFrameOrSeries :
1496
- # set the result index on the passed values object and
1496
+ # set the agg index on the passed values object and
1497
1497
# return the new object, xref 8046
1498
1498
1499
1499
obj_axis = self .obj ._get_axis (self .axis )
@@ -1586,7 +1586,7 @@ def _wrap_aggregated_output(
1586
1586
index = Index (range (self .grouper .ngroups ))
1587
1587
1588
1588
else :
1589
- index = self .grouper .result_index
1589
+ index = self .grouper .agg_index
1590
1590
1591
1591
if qs is not None :
1592
1592
# We get here with len(qs) != 1 and not self.as_index
@@ -1674,7 +1674,7 @@ def _numba_agg_general(
1674
1674
res_mgr = df ._mgr .apply (
1675
1675
aggregator , labels = ids , ngroups = ngroups , ** aggregator_kwargs
1676
1676
)
1677
- res_mgr .axes [1 ] = self .grouper .result_index
1677
+ res_mgr .axes [1 ] = self .grouper .agg_index
1678
1678
result = df ._constructor_from_mgr (res_mgr , axes = res_mgr .axes )
1679
1679
1680
1680
if data .ndim == 1 :
@@ -1745,7 +1745,7 @@ def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs):
1745
1745
len (df .columns ),
1746
1746
* args ,
1747
1747
)
1748
- index = self .grouper .result_index
1748
+ index = self .grouper .agg_index
1749
1749
if data .ndim == 1 :
1750
1750
result_kwargs = {"name" : data .name }
1751
1751
result = result .ravel ()
@@ -2038,7 +2038,7 @@ def _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT:
2038
2038
2039
2039
# for each col, reshape to size of original frame by take operation
2040
2040
ids , _ , _ = self .grouper .group_info
2041
- result = result .reindex (self .grouper .result_index , axis = self .axis , copy = False )
2041
+ result = result .reindex (self .grouper .agg_index , axis = self .axis , copy = False )
2042
2042
2043
2043
if self .obj .ndim == 1 :
2044
2044
# i.e. SeriesGroupBy
@@ -2814,7 +2814,7 @@ def _value_counts(
2814
2814
and not grouping ._observed
2815
2815
for grouping in groupings
2816
2816
):
2817
- levels_list = [ping .result_index for ping in groupings ]
2817
+ levels_list = [ping .agg_index for ping in groupings ]
2818
2818
multi_index , _ = MultiIndex .from_product (
2819
2819
levels_list , names = [ping .name for ping in groupings ]
2820
2820
).sortlevel ()
@@ -3514,7 +3514,7 @@ def ohlc(self) -> DataFrame:
3514
3514
3515
3515
agg_names = ["open" , "high" , "low" , "close" ]
3516
3516
result = self .obj ._constructor_expanddim (
3517
- res_values , index = self .grouper .result_index , columns = agg_names
3517
+ res_values , index = self .grouper .agg_index , columns = agg_names
3518
3518
)
3519
3519
return self ._reindex_output (result )
3520
3520
@@ -3835,18 +3835,18 @@ def rolling(self, *args, **kwargs) -> RollingGroupby:
3835
3835
@final
3836
3836
@Substitution (name = "groupby" )
3837
3837
@Appender (_common_see_also )
3838
- def expanding (self , * args , ** kwargs ) -> ExpandingGroupby :
3838
+ def expanding (self , * args , ** kwargs ) -> ExpandingGroupBy :
3839
3839
"""
3840
3840
Return an expanding grouper, providing expanding
3841
3841
functionality per group.
3842
3842
3843
3843
Returns
3844
3844
-------
3845
- pandas.api.typing.ExpandingGroupby
3845
+ pandas.api.typing.ExpandingGroupBy
3846
3846
"""
3847
- from pandas .core .window import ExpandingGroupby
3847
+ from pandas .core .window import ExpandingGroupBy
3848
3848
3849
- return ExpandingGroupby (
3849
+ return ExpandingGroupBy (
3850
3850
self ._selected_obj ,
3851
3851
* args ,
3852
3852
_grouper = self .grouper ,
@@ -5594,7 +5594,7 @@ def _reindex_output(
5594
5594
output = output .drop (labels = list (g_names ), axis = 1 )
5595
5595
5596
5596
# Set a temp index and reindex (possibly expanding)
5597
- output = output .set_index (self .grouper .result_index ).reindex (
5597
+ output = output .set_index (self .grouper .agg_index ).reindex (
5598
5598
index , copy = False , fill_value = fill_value
5599
5599
)
5600
5600
@@ -5782,8 +5782,8 @@ def _idxmax_idxmin(
5782
5782
if len (self .grouper .groupings ) == 1 :
5783
5783
result_len = len (self .grouper .groupings [0 ].grouping_vector .unique ())
5784
5784
else :
5785
- # result_index only contains observed groups in this case
5786
- result_len = len (self .grouper .result_index )
5785
+ # agg_index only contains observed groups in this case
5786
+ result_len = len (self .grouper .agg_index )
5787
5787
assert result_len <= expected_len
5788
5788
has_unobserved = result_len < expected_len
5789
5789
0 commit comments