forked from pandas-dev/pandas
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrolling.py
2694 lines (2409 loc) · 85.8 KB
/
rolling.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from __future__ import annotations
import copy
from datetime import timedelta
from functools import partial
import inspect
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
Sized,
)
import warnings
import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
to_offset,
)
import pandas._libs.window.aggregations as window_aggregations
from pandas._typing import (
ArrayLike,
Axis,
NDFrameT,
WindowingRankType,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_integer,
is_list_like,
is_scalar,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.missing import notna
from pandas.core._numba import executor
from pandas.core.algorithms import factorize
from pandas.core.apply import ResamplerWindowApply
from pandas.core.arrays import ExtensionArray
from pandas.core.base import (
DataError,
SelectionMixin,
)
import pandas.core.common as com
from pandas.core.indexers.objects import (
BaseIndexer,
FixedWindowIndexer,
GroupbyIndexer,
VariableWindowIndexer,
)
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
MultiIndex,
PeriodIndex,
TimedeltaIndex,
)
from pandas.core.reshape.concat import concat
from pandas.core.util.numba_ import (
get_jit_arguments,
maybe_use_numba,
)
from pandas.core.window.common import (
flex_binary_moment,
zsqrt,
)
from pandas.core.window.doc import (
_shared_docs,
args_compat,
create_section_header,
kwargs_compat,
kwargs_scipy,
numba_notes,
template_header,
template_returns,
template_see_also,
window_agg_numba_parameters,
window_apply_parameters,
)
from pandas.core.window.numba_ import (
generate_manual_numpy_nan_agg_with_axis,
generate_numba_apply_func,
generate_numba_table_func,
)
if TYPE_CHECKING:
from pandas import (
DataFrame,
Series,
)
from pandas.core.generic import NDFrame
from pandas.core.groupby.ops import BaseGrouper
from pandas.core.internals import Block # noqa:F401
class BaseWindow(SelectionMixin):
"""Provides utilities for performing windowing operations."""
_attributes: list[str] = []
exclusions: frozenset[Hashable] = frozenset()
_on: Index
def __init__(
self,
obj: NDFrame,
window=None,
min_periods: int | None = None,
center: bool = False,
win_type: str | None = None,
axis: Axis = 0,
on: str | Index | None = None,
closed: str | None = None,
step: int | None = None,
method: str = "single",
*,
selection=None,
) -> None:
self.obj = obj
self.on = on
self.closed = closed
self.step = step
self.window = window
self.min_periods = min_periods
self.center = center
# TODO(2.0): Change this back to self.win_type once deprecation is enforced
self._win_type = win_type
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.method = method
self._win_freq_i8 = None
if self.on is None:
if self.axis == 0:
self._on = self.obj.index
else:
# i.e. self.axis == 1
self._on = self.obj.columns
elif isinstance(self.on, Index):
self._on = self.on
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
self._on = Index(self.obj[self.on])
else:
raise ValueError(
f"invalid on specified as {self.on}, "
"must be a column (of DataFrame), an Index or None"
)
self._selection = selection
self._validate()
@property
def win_type(self):
if self._win_freq_i8 is not None:
warnings.warn(
"win_type will no longer return 'freq' in a future version. "
"Check the type of self.window instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return "freq"
return self._win_type
@property
def is_datetimelike(self) -> bool:
warnings.warn(
"is_datetimelike is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._win_freq_i8 is not None
def validate(self) -> None:
warnings.warn(
"validate is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._validate()
def _validate(self) -> None:
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None:
if not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
elif self.min_periods < 0:
raise ValueError("min_periods must be >= 0")
elif is_integer(self.window) and self.min_periods > self.window:
raise ValueError(
f"min_periods {self.min_periods} must be <= window {self.window}"
)
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or 'neither'")
if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):
raise TypeError(f"invalid type: {type(self)}")
if isinstance(self.window, BaseIndexer):
# Validate that the passed BaseIndexer subclass has
# a get_window_bounds with the correct signature.
get_window_bounds_signature = inspect.signature(
self.window.get_window_bounds
).parameters.keys()
expected_signature = inspect.signature(
BaseIndexer().get_window_bounds
).parameters.keys()
if get_window_bounds_signature != expected_signature:
raise ValueError(
f"{type(self.window).__name__} does not implement "
f"the correct signature for get_window_bounds"
)
if self.method not in ["table", "single"]:
raise ValueError("method must be 'table' or 'single")
if self.step is not None:
if not is_integer(self.step):
raise ValueError("step must be an integer")
elif self.step < 0:
raise ValueError("step must be >= 0")
def _check_window_bounds(
self, start: np.ndarray, end: np.ndarray, num_vals: int
) -> None:
if len(start) != len(end):
raise ValueError(
f"start ({len(start)}) and end ({len(end)}) bounds must be the "
f"same length"
)
elif len(start) != (num_vals + (self.step or 1) - 1) // (self.step or 1):
raise ValueError(
f"start and end bounds ({len(start)}) must be the same length "
f"as the object ({num_vals}) divided by the step ({self.step}) "
f"if given and rounded up"
)
def _slice_axis_for_step(self, index: Index, result: Sized | None = None) -> Index:
"""
Slices the index for a given result and the preset step.
"""
return (
index
if result is None or len(result) == len(index)
else index[:: self.step]
)
def _create_data(self, obj: NDFrameT) -> NDFrameT:
"""
Split data into blocks & return conformed data.
"""
# filter out the on from the object
if self.on is not None and not isinstance(self.on, Index) and obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
if self.axis == 1:
# GH: 20649 in case of mixed dtype and axis=1 we have to convert everything
# to float to calculate the complete row at once. We exclude all non-numeric
# dtypes.
obj = obj.select_dtypes(include=["number"], exclude=["timedelta"])
obj = obj.astype("float64", copy=False)
obj._mgr = obj._mgr.consolidate()
return obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
with warnings.catch_warnings():
# TODO(2.0): Remove once win_type deprecation is enforced
warnings.filterwarnings("ignore", "win_type", FutureWarning)
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
selection = None
if subset.ndim == 2 and (
(is_scalar(key) and key in subset) or is_list_like(key)
):
selection = key
new_win = type(self)(subset, selection=selection, **kwargs)
return new_win
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
def _dir_additions(self):
return self.obj._dir_additions()
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs_list = (
f"{attr_name}={getattr(self, attr_name)}"
for attr_name in self._attributes
if getattr(self, attr_name, None) is not None and attr_name[0] != "_"
)
attrs = ",".join(attrs_list)
return f"{type(self).__name__} [{attrs}]"
def __iter__(self):
obj = self._selected_obj.set_axis(self._on)
obj = self._create_data(obj)
indexer = self._get_window_indexer()
start, end = indexer.get_window_bounds(
num_values=len(obj),
min_periods=self.min_periods,
center=self.center,
closed=self.closed,
step=self.step,
)
self._check_window_bounds(start, end, len(obj))
for s, e in zip(start, end):
result = obj.iloc[slice(s, e)]
yield result
def _prep_values(self, values: ArrayLike) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if needs_i8_conversion(values.dtype):
raise NotImplementedError(
f"ops for {type(self).__name__} for this "
f"dtype {values.dtype} are not implemented"
)
else:
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
try:
if isinstance(values, ExtensionArray):
values = values.to_numpy(np.float64, na_value=np.nan)
else:
values = ensure_float64(values)
except (ValueError, TypeError) as err:
raise TypeError(f"cannot handle this type -> {values.dtype}") from err
# Convert inf to nan for C funcs
inf = np.isinf(values)
if inf.any():
values = np.where(inf, np.nan, values)
return values
def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None:
# if we have an 'on' column we want to put it back into
# the results in the same location
from pandas import Series
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
extra_col = Series(self._on, index=self.obj.index, name=name)
if name in result.columns:
# TODO: sure we want to overwrite results?
result[name] = extra_col
elif name in result.index.names:
pass
elif name in self._selected_obj.columns:
# insert in the same location as we had in _selected_obj
old_cols = self._selected_obj.columns
new_cols = result.columns
old_loc = old_cols.get_loc(name)
overlap = new_cols.intersection(old_cols[:old_loc])
new_loc = len(overlap)
result.insert(new_loc, name, extra_col)
else:
# insert at the end
result[name] = extra_col
@property
def _index_array(self):
# TODO: why do we get here with e.g. MultiIndex?
if needs_i8_conversion(self._on.dtype):
return self._on.asi8
return None
def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame:
"""Validate and finalize result."""
if out.shape[1] == 0 and obj.shape[1] > 0:
raise DataError("No numeric types to aggregate")
elif out.shape[1] == 0:
return obj.astype("float64")
self._insert_on_column(out, obj)
return out
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
if isinstance(self.window, BaseIndexer):
return self.window
if self._win_freq_i8 is not None:
return VariableWindowIndexer(
index_array=self._index_array,
window_size=self._win_freq_i8,
center=self.center,
)
return FixedWindowIndexer(window_size=self.window)
def _apply_series(
self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None
) -> Series:
"""
Series version of _apply_blockwise
"""
obj = self._create_data(self._selected_obj)
if name == "count":
# GH 12541: Special case for count where we support date-like types
obj = notna(obj).astype(int)
try:
values = self._prep_values(obj._values)
except (TypeError, NotImplementedError) as err:
raise DataError("No numeric types to aggregate") from err
result = homogeneous_func(values)
index = self._slice_axis_for_step(obj.index, result)
return obj._constructor(result, index=index, name=obj.name)
def _apply_blockwise(
self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None
) -> DataFrame | Series:
"""
Apply the given function to the DataFrame broken down into homogeneous
sub-frames.
"""
if self._selected_obj.ndim == 1:
return self._apply_series(homogeneous_func, name)
obj = self._create_data(self._selected_obj)
if name == "count":
# GH 12541: Special case for count where we support date-like types
obj = notna(obj).astype(int)
obj._mgr = obj._mgr.consolidate()
def hfunc(values: ArrayLike) -> ArrayLike:
values = self._prep_values(values)
return homogeneous_func(values)
if self.axis == 1:
obj = obj.T
taker = []
res_values = []
for i, arr in enumerate(obj._iter_column_arrays()):
# GH#42736 operate column-wise instead of block-wise
try:
res = hfunc(arr)
except (TypeError, NotImplementedError):
pass
else:
res_values.append(res)
taker.append(i)
index = self._slice_axis_for_step(
obj.index, res_values[0] if len(res_values) > 0 else None
)
df = type(obj)._from_arrays(
res_values,
index=index,
columns=obj.columns.take(taker),
verify_integrity=False,
)
if self.axis == 1:
df = df.T
if 0 != len(res_values) != len(obj.columns):
# GH#42738 ignore_failures dropped nuisance columns
dropped = obj.columns.difference(obj.columns.take(taker))
warnings.warn(
"Dropping of nuisance columns in rolling operations "
"is deprecated; in a future version this will raise TypeError. "
"Select only valid columns before calling the operation. "
f"Dropped columns were {dropped}",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._resolve_output(df, obj)
def _apply_tablewise(
self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None
) -> DataFrame | Series:
"""
Apply the given function to the DataFrame across the entire object
"""
if self._selected_obj.ndim == 1:
raise ValueError("method='table' not applicable for Series objects.")
obj = self._create_data(self._selected_obj)
values = self._prep_values(obj.to_numpy())
values = values.T if self.axis == 1 else values
result = homogeneous_func(values)
result = result.T if self.axis == 1 else result
index = self._slice_axis_for_step(obj.index, result)
columns = (
obj.columns
if result.shape[1] == len(obj.columns)
else obj.columns[:: self.step]
)
out = obj._constructor(result, index=index, columns=columns)
return self._resolve_output(out, obj)
def _apply_pairwise(
self,
target: DataFrame | Series,
other: DataFrame | Series | None,
pairwise: bool | None,
func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series],
) -> DataFrame | Series:
"""
Apply the given pairwise function given 2 pandas objects (DataFrame/Series)
"""
if other is None:
other = target
# only default unset
pairwise = True if pairwise is None else pairwise
elif not isinstance(other, (ABCDataFrame, ABCSeries)):
raise ValueError("other must be a DataFrame or Series")
return flex_binary_moment(target, other, func, pairwise=bool(pairwise))
def _apply(
self,
func: Callable[..., Any],
name: str | None = None,
numba_args: tuple[Any, ...] = (),
**kwargs,
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : callable function to apply
name : str,
numba_args : tuple
args to be passed when func is a numba func
**kwargs
additional arguments for rolling function and window function
Returns
-------
y : type of input
"""
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
def homogeneous_func(values: np.ndarray):
# calculation function
if values.size == 0:
return values.copy()
def calc(x):
start, end = window_indexer.get_window_bounds(
num_values=len(x),
min_periods=min_periods,
center=self.center,
closed=self.closed,
step=self.step,
)
self._check_window_bounds(start, end, len(x))
return func(x, start, end, min_periods, *numba_args)
with np.errstate(all="ignore"):
result = calc(values)
return result
if self.method == "single":
return self._apply_blockwise(homogeneous_func, name)
else:
return self._apply_tablewise(homogeneous_func, name)
def _numba_apply(
self,
func: Callable[..., Any],
engine_kwargs: dict[str, bool] | None = None,
*func_args,
):
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
obj = self._create_data(self._selected_obj)
if self.axis == 1:
obj = obj.T
values = self._prep_values(obj.to_numpy())
if values.ndim == 1:
values = values.reshape(-1, 1)
start, end = window_indexer.get_window_bounds(
num_values=len(values),
min_periods=min_periods,
center=self.center,
closed=self.closed,
step=self.step,
)
self._check_window_bounds(start, end, len(values))
aggregator = executor.generate_shared_aggregator(
func, **get_jit_arguments(engine_kwargs)
)
result = aggregator(values, start, end, min_periods, *func_args)
result = result.T if self.axis == 1 else result
index = self._slice_axis_for_step(obj.index, result)
if obj.ndim == 1:
result = result.squeeze()
out = obj._constructor(result, index=index, name=obj.name)
return out
else:
columns = self._slice_axis_for_step(obj.columns, result.T)
out = obj._constructor(result, index=index, columns=columns)
return self._resolve_output(out, obj)
def aggregate(self, func, *args, **kwargs):
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
class BaseWindowGroupby(BaseWindow):
"""
Provide the groupby windowing facilities.
"""
_grouper: BaseGrouper
_as_index: bool
_attributes: list[str] = ["_grouper"]
def __init__(
self,
obj: DataFrame | Series,
*args,
_grouper: BaseGrouper,
_as_index: bool = True,
**kwargs,
) -> None:
from pandas.core.groupby.ops import BaseGrouper
if not isinstance(_grouper, BaseGrouper):
raise ValueError("Must pass a BaseGrouper object.")
self._grouper = _grouper
self._as_index = _as_index
# GH 32262: It's convention to keep the grouping column in
# groupby.<agg_func>, but unexpected to users in
# groupby.rolling.<agg_func>
obj = obj.drop(columns=self._grouper.names, errors="ignore")
# GH 15354
if kwargs.get("step") is not None:
raise NotImplementedError("step not implemented for groupby")
super().__init__(obj, *args, **kwargs)
def _apply(
self,
func: Callable[..., Any],
name: str | None = None,
numba_args: tuple[Any, ...] = (),
**kwargs,
) -> DataFrame | Series:
result = super()._apply(
func,
name,
numba_args,
**kwargs,
)
# Reconstruct the resulting MultiIndex
# 1st set of levels = group by labels
# 2nd set of levels = original DataFrame/Series index
grouped_object_index = self.obj.index
grouped_index_name = [*grouped_object_index.names]
groupby_keys = copy.copy(self._grouper.names)
result_index_names = groupby_keys + grouped_index_name
drop_columns = [
key
for key in self._grouper.names
if key not in self.obj.index.names or key is None
]
if len(drop_columns) != len(groupby_keys):
# Our result will have still kept the column in the result
result = result.drop(columns=drop_columns, errors="ignore")
codes = self._grouper.codes
levels = copy.copy(self._grouper.levels)
group_indices = self._grouper.indices.values()
if group_indices:
indexer = np.concatenate(list(group_indices))
else:
indexer = np.array([], dtype=np.intp)
codes = [c.take(indexer) for c in codes]
# if the index of the original dataframe needs to be preserved, append
# this index (but reordered) to the codes/levels from the groupby
if grouped_object_index is not None:
idx = grouped_object_index.take(indexer)
if not isinstance(idx, MultiIndex):
idx = MultiIndex.from_arrays([idx])
codes.extend(list(idx.codes))
levels.extend(list(idx.levels))
result_index = MultiIndex(
levels, codes, names=result_index_names, verify_integrity=False
)
result.index = result_index
if not self._as_index:
result = result.reset_index(level=list(range(len(groupby_keys))))
return result
def _apply_pairwise(
self,
target: DataFrame | Series,
other: DataFrame | Series | None,
pairwise: bool | None,
func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series],
) -> DataFrame | Series:
"""
Apply the given pairwise function given 2 pandas objects (DataFrame/Series)
"""
# Manually drop the grouping column first
target = target.drop(columns=self._grouper.names, errors="ignore")
target = self._create_data(target)
result = super()._apply_pairwise(target, other, pairwise, func)
# 1) Determine the levels + codes of the groupby levels
if other is not None and not all(
len(group) == len(other) for group in self._grouper.indices.values()
):
# GH 42915
# len(other) != len(any group), so must reindex (expand) the result
# from flex_binary_moment to a "transform"-like result
# per groupby combination
old_result_len = len(result)
result = concat(
[
result.take(gb_indices).reindex(result.index)
for gb_indices in self._grouper.indices.values()
]
)
gb_pairs = (
com.maybe_make_list(pair) for pair in self._grouper.indices.keys()
)
groupby_codes = []
groupby_levels = []
# e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]]
for gb_level_pair in map(list, zip(*gb_pairs)):
labels = np.repeat(np.array(gb_level_pair), old_result_len)
codes, levels = factorize(labels)
groupby_codes.append(codes)
groupby_levels.append(levels)
else:
# pairwise=True or len(other) == len(each group), so repeat
# the groupby labels by the number of columns in the original object
groupby_codes = self._grouper.codes
# error: Incompatible types in assignment (expression has type
# "List[Index]", variable has type "List[Union[ndarray, Index]]")
groupby_levels = self._grouper.levels # type: ignore[assignment]
group_indices = self._grouper.indices.values()
if group_indices:
indexer = np.concatenate(list(group_indices))
else:
indexer = np.array([], dtype=np.intp)
if target.ndim == 1:
repeat_by = 1
else:
repeat_by = len(target.columns)
groupby_codes = [
np.repeat(c.take(indexer), repeat_by) for c in groupby_codes
]
# 2) Determine the levels + codes of the result from super()._apply_pairwise
if isinstance(result.index, MultiIndex):
result_codes = list(result.index.codes)
result_levels = list(result.index.levels)
result_names = list(result.index.names)
else:
idx_codes, idx_levels = factorize(result.index)
result_codes = [idx_codes]
result_levels = [idx_levels]
result_names = [result.index.name]
# 3) Create the resulting index by combining 1) + 2)
result_codes = groupby_codes + result_codes
result_levels = groupby_levels + result_levels
result_names = self._grouper.names + result_names
result_index = MultiIndex(
result_levels, result_codes, names=result_names, verify_integrity=False
)
result.index = result_index
return result
def _create_data(self, obj: NDFrameT) -> NDFrameT:
"""
Split data into blocks & return conformed data.
"""
# Ensure the object we're rolling over is monotonically sorted relative
# to the groups
# GH 36197
if not obj.empty:
groupby_order = np.concatenate(list(self._grouper.indices.values())).astype(
np.int64
)
obj = obj.take(groupby_order)
return super()._create_data(obj)
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried through to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
# GH 43355
subset = self.obj.set_index(self._on)
return super()._gotitem(key, ndim, subset=subset)
class Window(BaseWindow):
"""
Provide rolling window calculations.
Parameters
----------
window : int, offset, or BaseIndexer subclass
Size of the moving window.
If an integer, the fixed number of observations used for
each window.
If an offset, the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes.
To learn more about the offsets & frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
If a BaseIndexer subclass, the window boundaries
based on the defined ``get_window_bounds`` method. Additional rolling
keyword arguments, namely ``min_periods``, ``center``, ``closed`` and
``step`` will be passed to ``get_window_bounds``.
min_periods : int, default None
Minimum number of observations in window required to have a value;
otherwise, result is ``np.nan``.
For a window that is specified by an offset, ``min_periods`` will default to 1.
For a window that is specified by an integer, ``min_periods`` will default
to the size of the window.
center : bool, default False
If False, set the window labels as the right edge of the window index.
If True, set the window labels as the center of the window index.
win_type : str, default None
If ``None``, all points are evenly weighted.
If a string, it must be a valid `scipy.signal window function
<https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.
Certain Scipy window types require additional parameters to be passed
in the aggregation function. The additional parameters must match
the keywords specified in the Scipy window type method signature.
on : str, optional
For a DataFrame, a column label or Index level on which
to calculate the rolling window, rather than the DataFrame's index.
Provided integer column is ignored and excluded from result since
an integer index is not used to calculate the rolling window.
axis : int or str, default 0
If ``0`` or ``'index'``, roll across the rows.
If ``1`` or ``'columns'``, roll across the columns.
closed : str, default None
If ``'right'``, the first point in the window is excluded from calculations.
If ``'left'``, the last point in the window is excluded from calculations.
If ``'both'``, the no points in the window are excluded from calculations.
If ``'neither'``, the first and last points in the window are excluded
from calculations.
Default ``None`` (``'right'``).
.. versionchanged:: 1.2.0
The closed parameter with fixed windows is now supported.
step : int, default None
..versionadded:: 1.5.0
Evaluate the window at every ``step`` result, equivalent to slicing as
``[::step]``. ``window`` must be an integer. Using a step argument other
than None or 1 will produce a result with a different shape than the input.
method : str {'single', 'table'}, default 'single'
.. versionadded:: 1.3.0
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
Returns
-------
``Window`` subclass if a ``win_type`` is passed
``Rolling`` subclass if ``win_type`` is not passed
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
See :ref:`Windowing Operations <window.generic>` for further usage details
and examples.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
**window**
Rolling sum with a window length of 2 observations.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Rolling sum with a window span of 2 seconds.