Skip to content

Commit 91e6401

Browse files
authored
Merge branch 'main' into pyarrow-timestamp-support-for-map
2 parents 79c1fe2 + 24d31b7 commit 91e6401

File tree

107 files changed

+781
-712
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

107 files changed

+781
-712
lines changed

.github/workflows/wheels.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ jobs:
153153
run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV"
154154

155155
- name: Build wheels
156-
uses: pypa/[email protected].1
156+
uses: pypa/[email protected].2
157157
with:
158158
package-dir: ./dist/${{ startsWith(matrix.buildplat[1], 'macosx') && env.sdist_name || needs.build_sdist.outputs.sdist_file }}
159159
env:

.pre-commit-config.yaml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
minimum_pre_commit_version: 2.15.0
1+
minimum_pre_commit_version: 4.0.0
22
exclude: ^LICENSES/|\.(html|csv|svg)$
33
# reserve "manual" for relatively slow hooks which we still want to run in CI
44
default_stages: [
@@ -19,13 +19,13 @@ ci:
1919
skip: [pyright, mypy]
2020
repos:
2121
- repo: https://github.com/astral-sh/ruff-pre-commit
22-
rev: v0.9.9
22+
rev: v0.11.4
2323
hooks:
2424
- id: ruff
2525
args: [--exit-non-zero-on-fix]
2626
exclude: ^pandas/tests/frame/test_query_eval.py
2727
- id: ruff
28-
# TODO: remove autofixe-only rules when they are checked by ruff
28+
# TODO: remove autofix only rules when they are checked by ruff
2929
name: ruff-selected-autofixes
3030
alias: ruff-selected-autofixes
3131
files: ^pandas
@@ -34,7 +34,7 @@ repos:
3434
- id: ruff-format
3535
exclude: ^scripts|^pandas/tests/frame/test_query_eval.py
3636
- repo: https://github.com/jendrikseipp/vulture
37-
rev: 'v2.14'
37+
rev: v2.14
3838
hooks:
3939
- id: vulture
4040
entry: python scripts/run_vulture.py
@@ -95,14 +95,14 @@ repos:
9595
- id: sphinx-lint
9696
args: ["--enable", "all", "--disable", "line-too-long"]
9797
- repo: https://github.com/pre-commit/mirrors-clang-format
98-
rev: v19.1.7
98+
rev: v20.1.0
9999
hooks:
100100
- id: clang-format
101101
files: ^pandas/_libs/src|^pandas/_libs/include
102102
args: [-i]
103103
types_or: [c, c++]
104104
- repo: https://github.com/trim21/pre-commit-mirror-meson
105-
rev: v1.7.0
105+
rev: v1.7.2
106106
hooks:
107107
- id: meson-fmt
108108
args: ['--inplace']

asv_bench/benchmarks/frame_methods.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -517,7 +517,7 @@ def setup(self):
517517
self.df = DataFrame(np.random.randn(1000, 100))
518518

519519
self.s = Series(np.arange(1028.0))
520-
self.df2 = DataFrame({i: self.s for i in range(1028)})
520+
self.df2 = DataFrame(dict.fromkeys(range(1028), self.s))
521521
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list("ABC"))
522522

523523
def time_apply_user_func(self):

asv_bench/benchmarks/indexing_engines.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,14 @@ class NumericEngineIndexing:
6767
def setup(self, engine_and_dtype, index_type, unique, N):
6868
engine, dtype = engine_and_dtype
6969

70+
if (
71+
index_type == "non_monotonic"
72+
and dtype in [np.int16, np.int8, np.uint8]
73+
and unique
74+
):
75+
# Values overflow
76+
raise NotImplementedError
77+
7078
if index_type == "monotonic_incr":
7179
if unique:
7280
arr = np.arange(N * 3, dtype=dtype)
@@ -115,6 +123,14 @@ def setup(self, engine_and_dtype, index_type, unique, N):
115123
engine, dtype = engine_and_dtype
116124
dtype = dtype.lower()
117125

126+
if (
127+
index_type == "non_monotonic"
128+
and dtype in ["int16", "int8", "uint8"]
129+
and unique
130+
):
131+
# Values overflow
132+
raise NotImplementedError
133+
118134
if index_type == "monotonic_incr":
119135
if unique:
120136
arr = np.arange(N * 3, dtype=dtype)

ci/code_checks.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
7272
-i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \
7373
-i "pandas.Period.freq GL08" \
7474
-i "pandas.Period.ordinal GL08" \
75-
-i "pandas.Timestamp.max PR02" \
76-
-i "pandas.Timestamp.min PR02" \
77-
-i "pandas.Timestamp.resolution PR02" \
7875
-i "pandas.core.groupby.DataFrameGroupBy.plot PR02" \
7976
-i "pandas.core.groupby.SeriesGroupBy.plot PR02" \
8077
-i "pandas.core.resample.Resampler.quantile PR01,PR07" \

doc/source/development/debugging_extensions.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ By default building pandas from source will generate a release build. To generat
2323

2424
.. note::
2525

26-
conda environments update CFLAGS/CPPFLAGS with flags that are geared towards generating releases. If using conda, you may need to set ``CFLAGS="$CFLAGS -O0"`` and ``CPPFLAGS="$CPPFLAGS -O0"`` to ensure optimizations are turned off for debugging
26+
conda environments update CFLAGS/CPPFLAGS with flags that are geared towards generating releases, and may work counter towards usage in a development environment. If using conda, you should unset these environment variables via ``export CFLAGS=`` and ``export CPPFLAGS=``
2727

2828
By specifying ``builddir="debug"`` all of the targets will be built and placed in the debug directory relative to the project root. This helps to keep your debug and release artifacts separate; you are of course able to choose a different directory name or omit altogether if you do not care to separate build types.
2929

doc/source/getting_started/comparison/comparison_with_r.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ In Python, since ``a`` is a list, you can simply use list comprehension.
383383

384384
.. ipython:: python
385385
386-
a = np.array(list(range(1, 24)) + [np.NAN]).reshape(2, 3, 4)
386+
a = np.array(list(range(1, 24)) + [np.nan]).reshape(2, 3, 4)
387387
pd.DataFrame([tuple(list(x) + [val]) for x, val in np.ndenumerate(a)])
388388
389389
meltlist
@@ -402,7 +402,7 @@ In Python, this list would be a list of tuples, so
402402

403403
.. ipython:: python
404404
405-
a = list(enumerate(list(range(1, 5)) + [np.NAN]))
405+
a = list(enumerate(list(range(1, 5)) + [np.nan]))
406406
pd.DataFrame(a)
407407
408408
For more details and examples see :ref:`the Intro to Data Structures

doc/source/user_guide/basics.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2064,12 +2064,12 @@ different numeric dtypes will **NOT** be combined. The following example will gi
20642064

20652065
.. ipython:: python
20662066
2067-
df1 = pd.DataFrame(np.random.randn(8, 1), columns=["A"], dtype="float32")
2067+
df1 = pd.DataFrame(np.random.randn(8, 1), columns=["A"], dtype="float64")
20682068
df1
20692069
df1.dtypes
20702070
df2 = pd.DataFrame(
20712071
{
2072-
"A": pd.Series(np.random.randn(8), dtype="float16"),
2072+
"A": pd.Series(np.random.randn(8), dtype="float32"),
20732073
"B": pd.Series(np.random.randn(8)),
20742074
"C": pd.Series(np.random.randint(0, 255, size=8), dtype="uint8"), # [0,255] (range of uint8)
20752075
}

doc/source/user_guide/enhancingperf.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,7 @@ can be improved by passing an ``np.ndarray``.
171171
In [4]: %%cython
172172
...: cimport numpy as np
173173
...: import numpy as np
174+
...: np.import_array()
174175
...: cdef double f_typed(double x) except? -2:
175176
...: return x * (x - 1)
176177
...: cpdef double integrate_f_typed(double a, double b, int N):
@@ -225,6 +226,7 @@ and ``wraparound`` checks can yield more performance.
225226
...: cimport cython
226227
...: cimport numpy as np
227228
...: import numpy as np
229+
...: np.import_array()
228230
...: cdef np.float64_t f_typed(np.float64_t x) except? -2:
229231
...: return x * (x - 1)
230232
...: cpdef np.float64_t integrate_f_typed(np.float64_t a, np.float64_t b, np.int64_t N):

doc/source/whatsnew/v0.11.0.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,10 +74,10 @@ Numeric dtypes will propagate and can coexist in DataFrames. If a dtype is passe
7474

7575
.. ipython:: python
7676
77-
df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float32')
77+
df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float64')
7878
df1
7979
df1.dtypes
80-
df2 = pd.DataFrame({'A': pd.Series(np.random.randn(8), dtype='float16'),
80+
df2 = pd.DataFrame({'A': pd.Series(np.random.randn(8), dtype='float32'),
8181
'B': pd.Series(np.random.randn(8)),
8282
'C': pd.Series(range(8), dtype='uint8')})
8383
df2

doc/source/whatsnew/v3.0.0.rst

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ Other enhancements
6868
- :func:`read_parquet` accepts ``to_pandas_kwargs`` which are forwarded to :meth:`pyarrow.Table.to_pandas` which enables passing additional keywords to customize the conversion to pandas, such as ``maps_as_pydicts`` to read the Parquet map data type as python dictionaries (:issue:`56842`)
6969
- :meth:`.DataFrameGroupBy.transform`, :meth:`.SeriesGroupBy.transform`, :meth:`.DataFrameGroupBy.agg`, :meth:`.SeriesGroupBy.agg`, :meth:`.SeriesGroupBy.apply`, :meth:`.DataFrameGroupBy.apply` now support ``kurt`` (:issue:`40139`)
7070
- :meth:`DataFrame.apply` supports using third-party execution engines like the Bodo.ai JIT compiler (:issue:`60668`)
71+
- :meth:`DataFrame.iloc` and :meth:`Series.iloc` now support boolean masks in ``__getitem__`` for more consistent indexing behavior (:issue:`60994`)
7172
- :meth:`DataFrameGroupBy.transform`, :meth:`SeriesGroupBy.transform`, :meth:`DataFrameGroupBy.agg`, :meth:`SeriesGroupBy.agg`, :meth:`RollingGroupby.apply`, :meth:`ExpandingGroupby.apply`, :meth:`Rolling.apply`, :meth:`Expanding.apply`, :meth:`DataFrame.apply` with ``engine="numba"`` now supports positional arguments passed as kwargs (:issue:`58995`)
7273
- :meth:`Rolling.agg`, :meth:`Expanding.agg` and :meth:`ExponentialMovingWindow.agg` now accept :class:`NamedAgg` aggregations through ``**kwargs`` (:issue:`28333`)
7374
- :meth:`Series.map` can now accept kwargs to pass on to func (:issue:`59814`)
@@ -420,6 +421,7 @@ Other Deprecations
420421
- Deprecated lowercase strings ``w``, ``w-mon``, ``w-tue``, etc. denoting frequencies in :class:`Week` in favour of ``W``, ``W-MON``, ``W-TUE``, etc. (:issue:`58998`)
421422
- Deprecated parameter ``method`` in :meth:`DataFrame.reindex_like` / :meth:`Series.reindex_like` (:issue:`58667`)
422423
- Deprecated strings ``w``, ``d``, ``MIN``, ``MS``, ``US`` and ``NS`` denoting units in :class:`Timedelta` in favour of ``W``, ``D``, ``min``, ``ms``, ``us`` and ``ns`` (:issue:`59051`)
424+
- Deprecated the ``arg`` parameter of ``Series.map``; pass the added ``func`` argument instead. (:issue:`61260`)
423425
- Deprecated using ``epoch`` date format in :meth:`DataFrame.to_json` and :meth:`Series.to_json`, use ``iso`` instead. (:issue:`57063`)
424426

425427
.. ---------------------------------------------------------------------------
@@ -621,6 +623,7 @@ Performance improvements
621623
- Performance improvement in :meth:`CategoricalDtype.update_dtype` when ``dtype`` is a :class:`CategoricalDtype` with non ``None`` categories and ordered (:issue:`59647`)
622624
- Performance improvement in :meth:`DataFrame.__getitem__` when ``key`` is a :class:`DataFrame` with many columns (:issue:`61010`)
623625
- Performance improvement in :meth:`DataFrame.astype` when converting to extension floating dtypes, e.g. "Float64" (:issue:`60066`)
626+
- Performance improvement in :meth:`DataFrame.stack` when using ``future_stack=True`` and the DataFrame does not have a :class:`MultiIndex` (:issue:`58391`)
624627
- Performance improvement in :meth:`DataFrame.where` when ``cond`` is a :class:`DataFrame` with many columns (:issue:`61010`)
625628
- Performance improvement in :meth:`to_hdf` avoid unnecessary reopenings of the HDF5 file to speedup data addition to files with a very large number of groups . (:issue:`58248`)
626629
- Performance improvement in ``DataFrameGroupBy.__len__`` and ``SeriesGroupBy.__len__`` (:issue:`57595`)
@@ -636,6 +639,7 @@ Bug fixes
636639
Categorical
637640
^^^^^^^^^^^
638641
- Bug in :func:`Series.apply` where ``nan`` was ignored for :class:`CategoricalDtype` (:issue:`59938`)
642+
- Bug in :meth:`DataFrame.pivot` and :meth:`DataFrame.set_index` raising an ``ArrowNotImplementedError`` for columns with pyarrow dictionary dtype (:issue:`53051`)
639643
- Bug in :meth:`Series.convert_dtypes` with ``dtype_backend="pyarrow"`` where empty :class:`CategoricalDtype` :class:`Series` raised an error or got converted to ``null[pyarrow]`` (:issue:`59934`)
640644
-
641645

@@ -648,6 +652,7 @@ Datetimelike
648652
- Bug in :func:`date_range` where using a negative frequency value would not include all points between the start and end values (:issue:`56147`)
649653
- Bug in :func:`tseries.api.guess_datetime_format` would fail to infer time format when "%Y" == "%H%M" (:issue:`57452`)
650654
- Bug in :func:`tseries.frequencies.to_offset` would fail to parse frequency strings starting with "LWOM" (:issue:`59218`)
655+
- Bug in :meth:`DataFrame.fillna` raising an ``AssertionError`` instead of ``OutOfBoundsDatetime`` when filling a ``datetime64[ns]`` column with an out-of-bounds timestamp. Now correctly raises ``OutOfBoundsDatetime``. (:issue:`61208`)
651656
- Bug in :meth:`DataFrame.min` and :meth:`DataFrame.max` casting ``datetime64`` and ``timedelta64`` columns to ``float64`` and losing precision (:issue:`60850`)
652657
- Bug in :meth:`Dataframe.agg` with df with missing values resulting in IndexError (:issue:`58810`)
653658
- Bug in :meth:`DatetimeIndex.is_year_start` and :meth:`DatetimeIndex.is_quarter_start` does not raise on Custom business days frequencies bigger then "1C" (:issue:`58664`)
@@ -762,6 +767,7 @@ Plotting
762767
- Bug in :meth:`DataFrame.plot.bar` with ``stacked=True`` where labels on stacked bars with zero-height segments were incorrectly positioned at the base instead of the label position of the previous segment (:issue:`59429`)
763768
- Bug in :meth:`DataFrame.plot.line` raising ``ValueError`` when set both color and a ``dict`` style (:issue:`59461`)
764769
- Bug in :meth:`DataFrame.plot` that causes a shift to the right when the frequency multiplier is greater than one. (:issue:`57587`)
770+
- Bug in :meth:`Series.plot` preventing a line and scatter plot from being aligned (:issue:`61005`)
765771
- Bug in :meth:`Series.plot` with ``kind="pie"`` with :class:`ArrowDtype` (:issue:`59192`)
766772

767773
Groupby/resample/rolling
@@ -773,6 +779,7 @@ Groupby/resample/rolling
773779
- Bug in :meth:`.DataFrameGroupBy.quantile` when ``interpolation="nearest"`` is inconsistent with :meth:`DataFrame.quantile` (:issue:`47942`)
774780
- Bug in :meth:`.Resampler.interpolate` on a :class:`DataFrame` with non-uniform sampling and/or indices not aligning with the resulting resampled index would result in wrong interpolation (:issue:`21351`)
775781
- Bug in :meth:`DataFrame.ewm` and :meth:`Series.ewm` when passed ``times`` and aggregation functions other than mean (:issue:`51695`)
782+
- Bug in :meth:`DataFrame.resample` and :meth:`Series.resample` were not keeping the index name when the index had :class:`ArrowDtype` timestamp dtype (:issue:`61222`)
776783
- Bug in :meth:`DataFrame.resample` changing index type to :class:`MultiIndex` when the dataframe is empty and using an upsample method (:issue:`55572`)
777784
- Bug in :meth:`DataFrameGroupBy.agg` that raises ``AttributeError`` when there is dictionary input and duplicated columns, instead of returning a DataFrame with the aggregation of all duplicate columns. (:issue:`55041`)
778785
- Bug in :meth:`DataFrameGroupBy.apply` and :meth:`SeriesGroupBy.apply` for empty data frame with ``group_keys=False`` still creating output index using group keys. (:issue:`60471`)
@@ -825,6 +832,7 @@ Other
825832
- Bug in :class:`DataFrame` when passing a ``dict`` with a NA scalar and ``columns`` that would always return ``np.nan`` (:issue:`57205`)
826833
- Bug in :class:`Series` ignoring errors when trying to convert :class:`Series` input data to the given ``dtype`` (:issue:`60728`)
827834
- Bug in :func:`eval` on :class:`ExtensionArray` on including division ``/`` failed with a ``TypeError``. (:issue:`58748`)
835+
- Bug in :func:`eval` where method calls on binary operations like ``(x + y).dropna()`` would raise ``AttributeError: 'BinOp' object has no attribute 'value'`` (:issue:`61175`)
828836
- Bug in :func:`eval` where the names of the :class:`Series` were not preserved when using ``engine="numexpr"``. (:issue:`10239`)
829837
- Bug in :func:`eval` with ``engine="numexpr"`` returning unexpected result for float division. (:issue:`59736`)
830838
- Bug in :func:`to_numeric` raising ``TypeError`` when ``arg`` is a :class:`Timedelta` or :class:`Timestamp` scalar. (:issue:`59944`)
@@ -834,6 +842,7 @@ Other
834842
- Bug in :meth:`DataFrame.eval` and :meth:`DataFrame.query` which did not allow to use ``tan`` function. (:issue:`55091`)
835843
- Bug in :meth:`DataFrame.query` where using duplicate column names led to a ``TypeError``. (:issue:`59950`)
836844
- Bug in :meth:`DataFrame.query` which raised an exception or produced incorrect results when expressions contained backtick-quoted column names containing the hash character ``#``, backticks, or characters that fall outside the ASCII range (U+0001..U+007F). (:issue:`59285`) (:issue:`49633`)
845+
- Bug in :meth:`DataFrame.query` which raised an exception when querying integer column names using backticks. (:issue:`60494`)
837846
- Bug in :meth:`DataFrame.shift` where passing a ``freq`` on a DataFrame with no columns did not shift the index correctly. (:issue:`60102`)
838847
- Bug in :meth:`DataFrame.sort_index` when passing ``axis="columns"`` and ``ignore_index=True`` and ``ascending=False`` not returning a :class:`RangeIndex` columns (:issue:`57293`)
839848
- Bug in :meth:`DataFrame.transform` that was returning the wrong order unless the index was monotonically increasing. (:issue:`57069`)

environment.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ dependencies:
2323

2424
# required dependencies
2525
- python-dateutil
26-
- numpy<2
26+
- numpy<3
2727

2828
# optional dependencies
2929
- beautifulsoup4>=4.11.2
@@ -80,7 +80,7 @@ dependencies:
8080
- flake8=7.1.0 # run in subprocess over docstring examples
8181
- mypy=1.13.0 # pre-commit uses locally installed mypy
8282
- tokenize-rt # scripts/check_for_inconsistent_pandas_namespace.py
83-
- pre-commit>=4.0.1
83+
- pre-commit>=4.2.0
8484

8585
# documentation
8686
- gitpython # obtain contributors from git for whatsnew

pandas/__init__.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,17 @@
44

55
# Let users know if they're missing any of our hard dependencies
66
_hard_dependencies = ("numpy", "dateutil")
7-
_missing_dependencies = []
87

98
for _dependency in _hard_dependencies:
109
try:
1110
__import__(_dependency)
1211
except ImportError as _e: # pragma: no cover
13-
_missing_dependencies.append(f"{_dependency}: {_e}")
12+
raise ImportError(
13+
f"Unable to import required dependency {_dependency}. "
14+
"Please see the traceback for details."
15+
) from _e
1416

15-
if _missing_dependencies: # pragma: no cover
16-
raise ImportError(
17-
"Unable to import required dependencies:\n" + "\n".join(_missing_dependencies)
18-
)
19-
del _hard_dependencies, _dependency, _missing_dependencies
17+
del _hard_dependencies, _dependency
2018

2119
try:
2220
# numpy compat

pandas/_libs/algos.pyx

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -391,10 +391,11 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
391391
# clip `covxy / divisor` to ensure coeff is within bounds
392392
if divisor != 0:
393393
val = covxy / divisor
394-
if val > 1.0:
395-
val = 1.0
396-
elif val < -1.0:
397-
val = -1.0
394+
if not cov:
395+
if val > 1.0:
396+
val = 1.0
397+
elif val < -1.0:
398+
val = -1.0
398399
result[xi, yi] = result[yi, xi] = val
399400
else:
400401
result[xi, yi] = result[yi, xi] = NaN

pandas/_libs/tslibs/timedeltas.pyi

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ from typing import (
33
ClassVar,
44
Literal,
55
TypeAlias,
6-
TypeVar,
76
overload,
87
)
98

@@ -60,7 +59,6 @@ UnitChoices: TypeAlias = Literal[
6059
"nanos",
6160
"nanosecond",
6261
]
63-
_S = TypeVar("_S", bound=timedelta)
6462

6563
def get_unit_for_round(freq, creso: int) -> int: ...
6664
def disallow_ambiguous_unit(unit: str | None) -> None: ...
@@ -95,11 +93,11 @@ class Timedelta(timedelta):
9593
_value: int # np.int64
9694
# error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
9795
def __new__( # type: ignore[misc]
98-
cls: type[_S],
96+
cls: type[Self],
9997
value=...,
10098
unit: str | None = ...,
10199
**kwargs: float | np.integer | np.floating,
102-
) -> _S | NaTType: ...
100+
) -> Self | NaTType: ...
103101
@classmethod
104102
def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ...
105103
@property

0 commit comments

Comments
 (0)