Skip to content

Commit d6e2586

Browse files
authored
TYP: resample (#41126)
1 parent 244ae51 commit d6e2586

File tree

3 files changed

+54
-22
lines changed

3 files changed

+54
-22
lines changed

pandas/core/resample.py

+39-11
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from datetime import timedelta
55
from textwrap import dedent
66
from typing import (
7+
TYPE_CHECKING,
78
Callable,
89
no_type_check,
910
)
@@ -12,6 +13,7 @@
1213

1314
from pandas._libs import lib
1415
from pandas._libs.tslibs import (
16+
BaseOffset,
1517
IncompatibleFrequency,
1618
NaT,
1719
Period,
@@ -84,6 +86,9 @@
8486
Tick,
8587
)
8688

89+
if TYPE_CHECKING:
90+
from typing import Literal
91+
8792
_shared_docs_kwargs: dict[str, str] = {}
8893

8994

@@ -489,11 +494,11 @@ def _apply_loffset(self, result):
489494
self.loffset = None
490495
return result
491496

492-
def _get_resampler_for_grouping(self, groupby, **kwargs):
497+
def _get_resampler_for_grouping(self, groupby):
493498
"""
494499
Return the correct class for resampling with groupby.
495500
"""
496-
return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
501+
return self._resampler_for_grouping(self, groupby=groupby)
497502

498503
def _wrap_result(self, result):
499504
"""
@@ -1039,9 +1044,10 @@ class _GroupByMixin(PandasObject):
10391044
Provide the groupby facilities.
10401045
"""
10411046

1042-
_attributes: list[str]
1047+
_attributes: list[str] # in practice the same as Resampler._attributes
10431048

1044-
def __init__(self, obj, *args, **kwargs):
1049+
def __init__(self, obj, **kwargs):
1050+
# reached via ._gotitem and _get_resampler_for_grouping
10451051

10461052
parent = kwargs.pop("parent", None)
10471053
groupby = kwargs.pop("groupby", None)
@@ -1450,7 +1456,7 @@ class TimeGrouper(Grouper):
14501456
def __init__(
14511457
self,
14521458
freq="Min",
1453-
closed: str | None = None,
1459+
closed: Literal["left", "right"] | None = None,
14541460
label: str | None = None,
14551461
how="mean",
14561462
axis=0,
@@ -1822,8 +1828,13 @@ def _take_new_index(
18221828

18231829

18241830
def _get_timestamp_range_edges(
1825-
first, last, freq, closed="left", origin="start_day", offset=None
1826-
):
1831+
first: Timestamp,
1832+
last: Timestamp,
1833+
freq: BaseOffset,
1834+
closed: Literal["right", "left"] = "left",
1835+
origin="start_day",
1836+
offset: Timedelta | None = None,
1837+
) -> tuple[Timestamp, Timestamp]:
18271838
"""
18281839
Adjust the `first` Timestamp to the preceding Timestamp that resides on
18291840
the provided offset. Adjust the `last` Timestamp to the following
@@ -1895,8 +1906,13 @@ def _get_timestamp_range_edges(
18951906

18961907

18971908
def _get_period_range_edges(
1898-
first, last, freq, closed="left", origin="start_day", offset=None
1899-
):
1909+
first: Period,
1910+
last: Period,
1911+
freq: BaseOffset,
1912+
closed: Literal["right", "left"] = "left",
1913+
origin="start_day",
1914+
offset: Timedelta | None = None,
1915+
) -> tuple[Period, Period]:
19001916
"""
19011917
Adjust the provided `first` and `last` Periods to the respective Period of
19021918
the given offset that encompasses them.
@@ -1959,7 +1975,12 @@ def _insert_nat_bin(
19591975

19601976

19611977
def _adjust_dates_anchored(
1962-
first, last, freq, closed="right", origin="start_day", offset=None
1978+
first,
1979+
last,
1980+
freq,
1981+
closed: Literal["right", "left"] = "right",
1982+
origin="start_day",
1983+
offset: Timedelta | None = None,
19631984
):
19641985
# First and last offsets should be calculated from the start day to fix an
19651986
# error cause by resampling across multiple days when a one day period is
@@ -2029,7 +2050,14 @@ def _adjust_dates_anchored(
20292050
return fresult, lresult
20302051

20312052

2032-
def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):
2053+
def asfreq(
2054+
obj: FrameOrSeries,
2055+
freq,
2056+
method=None,
2057+
how=None,
2058+
normalize: bool = False,
2059+
fill_value=None,
2060+
) -> FrameOrSeries:
20332061
"""
20342062
Utility frequency conversion method for Series/DataFrame.
20352063

pandas/core/sorting.py

+10-4
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def decons_group_index(comp_labels, shape):
232232
return label_list[::-1]
233233

234234

235-
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull: bool):
235+
def decons_obs_group_ids(comp_ids: np.ndarray, obs_ids, shape, labels, xnull: bool):
236236
"""
237237
Reconstruct labels from observed group ids.
238238
@@ -360,6 +360,10 @@ def nargsort(
360360
key : Optional[Callable], default None
361361
mask : Optional[np.ndarray], default None
362362
Passed when called by ExtensionArray.argsort.
363+
364+
Returns
365+
-------
366+
np.ndarray[np.intp]
363367
"""
364368

365369
if key is not None:
@@ -404,7 +408,7 @@ def nargsort(
404408
indexer = np.concatenate([nan_idx, indexer])
405409
else:
406410
raise ValueError(f"invalid na_position: {na_position}")
407-
return indexer
411+
return ensure_platform_int(indexer)
408412

409413

410414
def nargminmax(values, method: str, axis: int = 0):
@@ -644,7 +648,9 @@ def get_group_index_sorter(
644648
return ensure_platform_int(sorter)
645649

646650

647-
def compress_group_index(group_index, sort: bool = True):
651+
def compress_group_index(
652+
group_index: np.ndarray, sort: bool = True
653+
) -> tuple[np.ndarray, np.ndarray]:
648654
"""
649655
Group_index is offsets into cartesian product of all possible labels. This
650656
space can be huge, so this function compresses it, by computing offsets
@@ -682,7 +688,7 @@ def _reorder_by_uniques(
682688
sorter = uniques.argsort()
683689

684690
# reverse_indexer is where elements came from
685-
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
691+
reverse_indexer = np.empty(len(sorter), dtype=np.intp)
686692
reverse_indexer.put(sorter, np.arange(len(sorter)))
687693

688694
mask = labels < 0

pandas/core/window/rolling.py

+5-7
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,7 @@
4343
)
4444
from pandas.core.dtypes.generic import (
4545
ABCDataFrame,
46-
ABCDatetimeIndex,
47-
ABCPeriodIndex,
4846
ABCSeries,
49-
ABCTimedeltaIndex,
5047
)
5148
from pandas.core.dtypes.missing import notna
5249

@@ -58,8 +55,11 @@
5855
)
5956
import pandas.core.common as com
6057
from pandas.core.indexes.api import (
58+
DatetimeIndex,
6159
Index,
6260
MultiIndex,
61+
PeriodIndex,
62+
TimedeltaIndex,
6363
)
6464
from pandas.core.internals import ArrayManager
6565
from pandas.core.reshape.concat import concat
@@ -1461,9 +1461,7 @@ def validate(self):
14611461
# we allow rolling on a datetimelike index
14621462
if (
14631463
self.obj.empty
1464-
or isinstance(
1465-
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
1466-
)
1464+
or isinstance(self._on, (DatetimeIndex, TimedeltaIndex, PeriodIndex))
14671465
) and isinstance(self.window, (str, BaseOffset, timedelta)):
14681466

14691467
self._validate_monotonic()
@@ -1476,7 +1474,7 @@ def validate(self):
14761474
f"passed window {self.window} is not "
14771475
"compatible with a datetimelike index"
14781476
) from err
1479-
if isinstance(self._on, ABCPeriodIndex):
1477+
if isinstance(self._on, PeriodIndex):
14801478
self._win_freq_i8 = freq.nanos / (self._on.freq.nanos / self._on.freq.n)
14811479
else:
14821480
self._win_freq_i8 = freq.nanos

0 commit comments

Comments
 (0)