forked from pandas-dev/pandas
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathops.py
1356 lines (1154 loc) · 42.3 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
TYPE_CHECKING,
Callable,
Generic,
Hashable,
Iterator,
NoReturn,
Sequence,
final,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
from pandas._typing import (
ArrayLike,
DtypeObj,
NDFrameT,
Shape,
npt,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
ensure_uint64,
is_1d_only_ea_dtype,
is_bool_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import FloatingDtype
from pandas.core.arrays.integer import IntegerDtype
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
from pandas.core.arrays.string_ import StringDtype
from pandas.core.frame import DataFrame
from pandas.core.groupby import grouper
from pandas.core.indexes.api import (
CategoricalIndex,
Index,
MultiIndex,
ensure_index,
)
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
)
if TYPE_CHECKING:
from pandas.core.generic import NDFrame
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.groupby
Parameters
----------
kind: str
Whether the operation is an aggregate or transform.
how: str
Operation name, e.g. "mean".
has_dropped_na: bool
True precisely when dropna=True and the grouper contains a null value.
"""
# Functions for which we do _not_ attempt to cast the cython result
# back to the original dtype.
cast_blocklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
self.kind = kind
self.how = how
self.has_dropped_na = has_dropped_na
_CYTHON_FUNCTIONS = {
"aggregate": {
"sum": "group_sum",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median_float64",
"var": "group_var",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod_float64",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
# "group_any" and "group_all" are also support masks, but don't go
# through WrappedCythonOp
_MASKED_CYTHON_FUNCTIONS = {
"cummin",
"cummax",
"min",
"max",
"last",
"first",
"rank",
"sum",
}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(maxsize=None)
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, ftype)
if is_numeric:
return f
elif dtype == np.dtype(object):
if how in ["median", "cumprod"]:
# no fused types -> no __signatures__
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
elif "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
else:
raise NotImplementedError(
"This should not be reached. Please report a bug at "
"github.com/pandas-dev/pandas/",
dtype,
)
def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
"""
Cast numeric dtypes to float64 for functions that only support that.
Parameters
----------
values : np.ndarray
Returns
-------
values : np.ndarray
"""
how = self.how
if how in ["median", "cumprod"]:
# these two only have float64 implementations
# We should only get here with is_numeric, as non-numeric cases
# should raise in _get_cython_function
values = ensure_float64(values)
elif values.dtype.kind in ["i", "u"]:
if how in ["var", "prod", "mean", "ohlc"] or (
self.kind == "transform" and self.has_dropped_na
):
# result may still include NaN, so we have to cast
values = ensure_float64(values)
elif how == "sum":
# Avoid overflow during group op
if values.dtype.kind == "i":
values = ensure_int64(values)
else:
values = ensure_uint64(values)
return values
# TODO: general case implementation overridable by EAs.
def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if isinstance(dtype, CategoricalDtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["sum", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
elif how not in ["rank"]:
# only "rank" is implemented in cython
raise NotImplementedError(f"{dtype} dtype not supported")
elif not dtype.ordered:
# TODO: TypeError?
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_any_dtype(dtype):
# TODO: same for period_dtype? no for these methods with Period
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["sum", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def _get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : np.dtype
Returns
-------
np.dtype
The desired dtype of the result.
"""
how = self.how
if how in ["sum", "cumsum", "sum", "prod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
elif how in ["mean", "median", "var"]:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
return dtype
elif is_numeric_dtype(dtype):
return np.dtype(np.float64)
return dtype
def uses_mask(self) -> bool:
return self.how in self._MASKED_CYTHON_FUNCTIONS
@final
def _ea_wrap_cython_operation(
self,
values: ExtensionArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> ArrayLike:
"""
If we have an ExtensionArray, unwrap, call _cython_operation, and
re-wrap if appropriate.
"""
if isinstance(values, BaseMaskedArray) and self.uses_mask():
return self._masked_ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
elif isinstance(values, Categorical) and self.uses_mask():
assert self.how == "rank" # the only one implemented ATM
assert values.ordered # checked earlier
mask = values.isna()
npvalues = values._ndarray
res_values = self._cython_op_ndim_compat(
npvalues,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
**kwargs,
)
# If we ever have more than just "rank" here, we'll need to do
# `if self.how in self.cast_blocklist` like we do for other dtypes.
return res_values
npvalues = self._ea_to_cython_values(values)
res_values = self._cython_op_ndim_compat(
npvalues,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
if self.how in self.cast_blocklist:
# i.e. how in ["rank"], since other cast_blocklist methods dont go
# through cython_operation
return res_values
return self._reconstruct_ea_result(values, res_values)
# TODO: general case implementation overridable by EAs.
def _ea_to_cython_values(self, values: ExtensionArray) -> np.ndarray:
# GH#43682
if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
npvalues = values._ndarray.view("M8[ns]")
elif isinstance(values.dtype, (BooleanDtype, IntegerDtype)):
# IntegerArray or BooleanArray
npvalues = values.to_numpy("float64", na_value=np.nan)
elif isinstance(values.dtype, FloatingDtype):
# FloatingArray
npvalues = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
elif isinstance(values.dtype, StringDtype):
# StringArray
npvalues = values.to_numpy(object, na_value=np.nan)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: {values.dtype}"
)
return npvalues
# TODO: general case implementation overridable by EAs.
def _reconstruct_ea_result(
self, values: ExtensionArray, res_values: np.ndarray
) -> ExtensionArray:
"""
Construct an ExtensionArray result from an ndarray result.
"""
dtype: BaseMaskedDtype | StringDtype
if isinstance(values.dtype, StringDtype):
dtype = values.dtype
string_array_cls = dtype.construct_array_type()
return string_array_cls._from_sequence(res_values, dtype=dtype)
elif isinstance(values.dtype, BaseMaskedDtype):
new_dtype = self._get_result_dtype(values.dtype.numpy_dtype)
dtype = BaseMaskedDtype.from_numpy_dtype(new_dtype)
masked_array_cls = dtype.construct_array_type()
return masked_array_cls._from_sequence(res_values, dtype=dtype)
elif isinstance(values, (DatetimeArray, TimedeltaArray, PeriodArray)):
# In to_cython_values we took a view as M8[ns]
assert res_values.dtype == "M8[ns]"
res_values = res_values.view(values._ndarray.dtype)
return values._from_backing_data(res_values)
raise NotImplementedError
@final
def _masked_ea_wrap_cython_operation(
self,
values: BaseMaskedArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> BaseMaskedArray:
"""
Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's
and cython algorithms which accept a mask.
"""
orig_values = values
# libgroupby functions are responsible for NOT altering mask
mask = values._mask
if self.kind != "aggregate":
result_mask = mask.copy()
else:
result_mask = np.zeros(ngroups, dtype=bool)
arr = values._data
res_values = self._cython_op_ndim_compat(
arr,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
# res_values should already have the correct dtype, we just need to
# wrap in a MaskedArray
return orig_values._maybe_mask_result(res_values, result_mask)
@final
def _cython_op_ndim_compat(
self,
values: np.ndarray,
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: npt.NDArray[np.bool_] | None = None,
result_mask: npt.NDArray[np.bool_] | None = None,
**kwargs,
) -> np.ndarray:
if values.ndim == 1:
# expand to 2d, dispatch, then squeeze if appropriate
values2d = values[None, :]
if mask is not None:
mask = mask[None, :]
if result_mask is not None:
result_mask = result_mask[None, :]
res = self._call_cython_op(
values2d,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
if res.shape[0] == 1:
return res[0]
# otherwise we have OHLC
return res.T
return self._call_cython_op(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
@final
def _call_cython_op(
self,
values: np.ndarray, # np.ndarray[ndim=2]
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: npt.NDArray[np.bool_] | None,
result_mask: npt.NDArray[np.bool_] | None,
**kwargs,
) -> np.ndarray: # np.ndarray[ndim=2]
orig_values = values
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
is_datetimelike = needs_i8_conversion(dtype)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(dtype):
values = values.view("uint8")
if values.dtype == "float16":
values = values.astype(np.float32)
values = values.T
if mask is not None:
mask = mask.T
if result_mask is not None:
result_mask = result_mask.T
out_shape = self._get_output_shape(ngroups, values)
func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric)
values = self._get_cython_vals(values)
out_dtype = self._get_out_dtype(values.dtype)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
if self.kind == "aggregate":
counts = np.zeros(ngroups, dtype=np.int64)
if self.how in ["min", "max", "mean", "last", "first"]:
func(
out=result,
counts=counts,
values=values,
labels=comp_ids,
min_count=min_count,
mask=mask,
result_mask=result_mask,
is_datetimelike=is_datetimelike,
)
elif self.how in ["sum"]:
# We support datetimelike
func(
out=result,
counts=counts,
values=values,
labels=comp_ids,
mask=mask,
result_mask=result_mask,
min_count=min_count,
is_datetimelike=is_datetimelike,
)
else:
func(result, counts, values, comp_ids, min_count)
else:
# TODO: min_count
if self.uses_mask():
if self.how != "rank":
# TODO: should rank take result_mask?
kwargs["result_mask"] = result_mask
func(
out=result,
values=values,
labels=comp_ids,
ngroups=ngroups,
is_datetimelike=is_datetimelike,
mask=mask,
**kwargs,
)
else:
func(
out=result,
values=values,
labels=comp_ids,
ngroups=ngroups,
is_datetimelike=is_datetimelike,
**kwargs,
)
if self.kind == "aggregate":
# i.e. counts is defined. Locations where count<min_count
# need to have the result set to np.nan, which may require casting,
# see GH#40767
if is_integer_dtype(result.dtype) and not is_datetimelike:
# Neutral value for sum is 0, so don't fill empty groups with nan
cutoff = max(0 if self.how == "sum" else 1, min_count)
empty_groups = counts < cutoff
if empty_groups.any():
if result_mask is not None and self.uses_mask():
assert result_mask[empty_groups].all()
else:
# Note: this conversion could be lossy, see GH#40767
result = result.astype("float64")
result[empty_groups] = np.nan
result = result.T
if self.how not in self.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
# Casting only needed for float16, bool, datetimelike,
# and self.how in ["sum", "prod", "ohlc", "cumprod"]
res_dtype = self._get_result_dtype(orig_values.dtype)
op_result = maybe_downcast_to_dtype(result, res_dtype)
else:
op_result = result
return op_result
@final
def cython_operation(
self,
*,
values: ArrayLike,
axis: int,
min_count: int = -1,
comp_ids: np.ndarray,
ngroups: int,
**kwargs,
) -> ArrayLike:
"""
Call our cython function, with appropriate pre- and post- processing.
"""
if values.ndim > 2:
raise NotImplementedError("number of dimensions is currently limited to 2")
elif values.ndim == 2:
assert axis == 1, axis
elif not is_1d_only_ea_dtype(values.dtype):
# Note: it is *not* the case that axis is always 0 for 1-dim values,
# as we can have 1D ExtensionArrays that we need to treat as 2D
assert axis == 0
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
# can we do this operation with our cython functions
# if not raise NotImplementedError
self._disallow_invalid_ops(dtype, is_numeric)
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
return self._ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
return self._cython_op_ndim_compat(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : np.ndarray[np.intp], optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
axis: Index
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: npt.NDArray[np.intp] | None = None,
dropna: bool = True,
) -> None:
assert isinstance(axis, Index), axis
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self._sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self) -> Iterator[Hashable]:
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(
self, data: NDFrameT, axis: int = 0
) -> Iterator[tuple[Hashable, NDFrameT]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self.group_keys_seq
yield from zip(keys, splitter)
@final
def _get_splitter(self, data: NDFrame, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
"""
ids, _, ngroups = self.group_info
return get_splitter(data, ids, ngroups, axis=axis)
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouping_vector
@final
@cache_readonly
def group_keys_seq(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_list(ids, ngroups, self.levels, self.codes)
@final
def apply(
self, f: Callable, data: DataFrame | Series, axis: int = 0
) -> tuple[list, bool]:
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self.group_keys_seq
result_values = []
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
for key, group in zipped:
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not mutated and not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
# getattr pattern for __name__ is needed for functools.partial objects
if len(group_keys) == 0 and getattr(f, "__name__", None) not in [
"idxmin",
"idxmax",
"nanargmin",
"nanargmax",
]:
# If group_keys is empty, then no function calls have been made,
# so we will not have raised even if this is an invalid dtype.
# So do one dummy call here to raise appropriate TypeError.
f(data.iloc[:0])
return result_values, mutated
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""dict {group name -> group indices}"""
if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@final
def result_ilocs(self) -> npt.NDArray[np.intp]:
"""
Get the original integer locations of result_index in the input.
"""
# Original indices are where group_index would go via sorting.
# But when dropna is true, we need to remove null values while accounting for
# any gaps that then occur because of them.
group_index = get_group_index(
self.codes, self.shape, sort=self._sort, xnull=True
)
group_index, _ = compress_group_index(group_index, sort=self._sort)
if self.has_dropped_na:
mask = np.where(group_index >= 0)
# Count how many gaps are caused by previous null values for each position
null_gaps = np.cumsum(group_index == -1)[mask]
group_index = group_index[mask]
result = get_group_index_sorter(group_index, self.ngroups)
if self.has_dropped_na:
# Shift by the number of prior null gaps
result += np.take(null_gaps, result)
return result
@final
@property
def codes(self) -> list[npt.NDArray[np.signedinteger]]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids, _, ngroups = self.group_info
out: np.ndarray | list
if ngroups:
out = np.bincount(ids[ids != -1], minlength=ngroups)
else:
out = []
return Series(out, index=self.result_index, dtype="int64")
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
"""dict {group name -> group labels}"""
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index)
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic_increasing
@final
@cache_readonly
def has_dropped_na(self) -> bool:
"""
Whether grouper has null value(s) that are dropped.
"""
return bool((self.group_info[0] < 0).any())
@cache_readonly
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
comp_ids, obs_group_ids = self._get_compressed_codes()
ngroups = len(obs_group_ids)
comp_ids = ensure_platform_int(comp_ids)
return comp_ids, obs_group_ids, ngroups
@final
@cache_readonly
def codes_info(self) -> npt.NDArray[np.intp]:
# return the codes of items in original grouped axis
ids, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((ids, self.indexer))
ids = ids[sorter]
ids = ensure_platform_int(ids)
# TODO: if numpy annotates np.lexsort, this ensure_platform_int
# may become unnecessary
return ids
@final
def _get_compressed_codes(
self,
) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]:
# The first returned ndarray may have any signed integer dtype
if len(self.groupings) > 1:
group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
return compress_group_index(group_index, sort=self._sort)
# FIXME: compress_group_index's second return value is int64, not intp
ping = self.groupings[0]
return ping.codes, np.arange(len(ping.group_index), dtype=np.intp)
@final
@cache_readonly
def ngroups(self) -> int:
return len(self.result_index)
@property
def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]:
codes = self.codes
ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
@cache_readonly
def result_index(self) -> Index:
if len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
codes = self.reconstructed_codes
levels = [ping.result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@final
def get_group_levels(self) -> list[ArrayLike]:
# Note: only called from _insert_inaxis_grouper_inplace, which
# is only called for BaseGrouper, never for BinGrouper
if len(self.groupings) == 1:
return [self.groupings[0].group_arraylike]
name_list = []
for ping, codes in zip(self.groupings, self.reconstructed_codes):
codes = ensure_platform_int(codes)
levels = ping.group_arraylike.take(codes)
name_list.append(levels)
return name_list