Skip to content

Commit aac3e59

Browse files
authored
Merge branch 'pandas-dev:main' into patch-1
2 parents 9805875 + 7ee1091 commit aac3e59

File tree

304 files changed

+2516
-787
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

304 files changed

+2516
-787
lines changed

.gitattributes

+5-1
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ ci export-ignore
6868
doc export-ignore
6969
gitpod export-ignore
7070
MANIFEST.in export-ignore
71-
scripts export-ignore
71+
scripts/** export-ignore
7272
typings export-ignore
7373
web export-ignore
7474
CITATION.cff export-ignore
@@ -82,3 +82,7 @@ setup.py export-ignore
8282
# csv_dir_path fixture checks the existence of the directory
8383
# exclude the whole directory to avoid running related tests in sdist
8484
pandas/tests/io/parser/data export-ignore
85+
86+
# Include cibw script in sdist since it's needed for building wheels
87+
scripts/cibw_before_build.sh -export-ignore
88+
scripts/cibw_before_test.sh -export-ignore

.github/workflows/unit-tests.yml

+4
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,9 @@ jobs:
5757
# Also install zh_CN (its encoding is gb2312) but do not activate it.
5858
# It will be temporarily activated during tests with locale.setlocale
5959
extra_loc: "zh_CN"
60+
- name: "Future infer strings"
61+
env_file: actions-311.yaml
62+
pandas_future_infer_string: "1"
6063
- name: "Pypy"
6164
env_file: actions-pypy-39.yaml
6265
pattern: "not slow and not network and not single_cpu"
@@ -75,6 +78,7 @@ jobs:
7578
LANG: ${{ matrix.lang || 'C.UTF-8' }}
7679
LC_ALL: ${{ matrix.lc_all || '' }}
7780
PANDAS_CI: '1'
81+
PANDAS_FUTURE_INFER_STRING: ${{ matrix.pandas_future_infer_string || '0' }}
7882
TEST_ARGS: ${{ matrix.test_args || '' }}
7983
PYTEST_WORKERS: 'auto'
8084
PYTEST_TARGET: ${{ matrix.pytest_target || 'pandas' }}

.github/workflows/wheels.yml

+21-7
Original file line numberDiff line numberDiff line change
@@ -99,14 +99,25 @@ jobs:
9999
- [macos-14, macosx_arm64]
100100
- [windows-2022, win_amd64]
101101
# TODO: support PyPy?
102-
python: [["cp310", "3.10"], ["cp311", "3.11"], ["cp312", "3.12"]]
103-
102+
python: [["cp310", "3.10"], ["cp311", "3.11"], ["cp312", "3.12"], ["cp313", "3.13"], ["cp313t", "3.13"]]
103+
include:
104+
# TODO: Remove this plus installing build deps in cibw_before_build.sh
105+
# and test deps in cibw_before_test.sh after pandas can be built with a released NumPy/Cython
106+
- python: ["cp313", "3.13"]
107+
cibw_build_frontend: 'pip; args: --no-build-isolation'
108+
- python: ["cp313t", "3.13"]
109+
cibw_build_frontend: 'pip; args: --no-build-isolation'
104110
# Build Pyodide wheels and upload them to Anaconda.org
105111
# NOTE: this job is similar to the one in unit-tests.yml except for the fact
106112
# that it uses cibuildwheel instead of a standard Pyodide xbuildenv setup.
107-
include:
108-
- buildplat: [ubuntu-22.04, pyodide_wasm32]
109-
python: ["cp312", "3.12"]
113+
- buildplat: [ubuntu-22.04, pyodide_wasm32]
114+
python: ["cp312", "3.12"]
115+
cibw_build_frontend: 'build'
116+
# TODO: Build free-threaded wheels for Windows
117+
exclude:
118+
- buildplat: [windows-2022, win_amd64]
119+
python: ["cp313t", "3.13"]
120+
110121
env:
111122
IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }}
112123
IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
@@ -153,6 +164,7 @@ jobs:
153164
env:
154165
CIBW_PRERELEASE_PYTHONS: True
155166
CIBW_BUILD: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }}
167+
CIBW_BUILD_FRONTEND: ${{ matrix.cibw_build_frontend || 'pip' }}
156168
CIBW_PLATFORM: ${{ matrix.buildplat[1] == 'pyodide_wasm32' && 'pyodide' || 'auto' }}
157169

158170
- name: Set up Python
@@ -176,15 +188,17 @@ jobs:
176188
- name: Test Windows Wheels
177189
if: ${{ matrix.buildplat[1] == 'win_amd64' }}
178190
shell: pwsh
191+
# TODO: Remove NumPy nightly install when there's a 3.13 wheel on PyPI
179192
run: |
180193
$TST_CMD = @"
181194
python -m pip install hypothesis>=6.84.0 pytest>=7.3.2 pytest-xdist>=3.4.0;
195+
${{ matrix.python[1] == '3.13' && 'python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy;' }}
182196
python -m pip install `$(Get-Item pandas\wheelhouse\*.whl);
183197
python -c `'import pandas as pd; pd.test(extra_args=[`\"--no-strict-data-files`\", `\"-m not clipboard and not single_cpu and not slow and not network and not db`\"])`';
184198
"@
185199
# add rc to the end of the image name if the Python version is unreleased
186-
docker pull python:${{ matrix.python[1] == '3.12' && '3.12-rc' || format('{0}-windowsservercore', matrix.python[1]) }}
187-
docker run --env PANDAS_CI='1' -v ${PWD}:C:\pandas python:${{ matrix.python[1] == '3.12' && '3.12-rc' || format('{0}-windowsservercore', matrix.python[1]) }} powershell -Command $TST_CMD
200+
docker pull python:${{ matrix.python[1] == '3.13' && '3.13-rc' || format('{0}-windowsservercore', matrix.python[1]) }}
201+
docker run --env PANDAS_CI='1' -v ${PWD}:C:\pandas python:${{ matrix.python[1] == '3.13' && '3.13-rc' || format('{0}-windowsservercore', matrix.python[1]) }} powershell -Command $TST_CMD
188202
189203
- uses: actions/upload-artifact@v4
190204
with:

MANIFEST.in

+4
Original file line numberDiff line numberDiff line change
@@ -62,3 +62,7 @@ prune pandas/tests/io/parser/data
6262
# Selectively re-add *.cxx files that were excluded above
6363
graft pandas/_libs/src
6464
graft pandas/_libs/include
65+
66+
# Include cibw script in sdist since it's needed for building wheels
67+
include scripts/cibw_before_build.sh
68+
include scripts/cibw_before_test.sh

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ The source code is currently hosted on GitHub at:
9696
https://github.com/pandas-dev/pandas
9797

9898
Binary installers for the latest released version are available at the [Python
99-
Package Index (PyPI)](https://pypi.org/project/pandas) and on [Conda](https://docs.conda.io/en/latest/).
99+
Package Index (PyPI)](https://pypi.org/project/pandas) and on [Conda](https://anaconda.org/conda-forge/pandas).
100100

101101
```sh
102102
# conda

ci/code_checks.sh

-12
Original file line numberDiff line numberDiff line change
@@ -70,16 +70,10 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
7070
--format=actions \
7171
-i ES01 `# For now it is ok if docstrings are missing the extended summary` \
7272
-i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \
73-
-i "pandas.MultiIndex.append PR07,SA01" \
74-
-i "pandas.MultiIndex.copy PR07,RT03,SA01" \
7573
-i "pandas.MultiIndex.get_level_values SA01" \
76-
-i "pandas.MultiIndex.get_loc PR07" \
7774
-i "pandas.MultiIndex.get_loc_level PR07" \
78-
-i "pandas.MultiIndex.levshape SA01" \
7975
-i "pandas.MultiIndex.names SA01" \
80-
-i "pandas.MultiIndex.remove_unused_levels RT03,SA01" \
8176
-i "pandas.MultiIndex.reorder_levels RT03,SA01" \
82-
-i "pandas.MultiIndex.set_levels RT03,SA01" \
8377
-i "pandas.MultiIndex.sortlevel PR07,SA01" \
8478
-i "pandas.MultiIndex.to_frame RT03" \
8579
-i "pandas.NA SA01" \
@@ -223,7 +217,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
223217
-i "pandas.Timestamp.fromordinal SA01" \
224218
-i "pandas.Timestamp.fromtimestamp PR01,SA01" \
225219
-i "pandas.Timestamp.hour GL08" \
226-
-i "pandas.Timestamp.isoweekday SA01" \
227220
-i "pandas.Timestamp.max PR02" \
228221
-i "pandas.Timestamp.microsecond GL08" \
229222
-i "pandas.Timestamp.min PR02" \
@@ -304,10 +297,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
304297
-i "pandas.api.types.is_re PR07,SA01" \
305298
-i "pandas.api.types.is_re_compilable PR07,SA01" \
306299
-i "pandas.api.types.is_sparse SA01" \
307-
-i "pandas.api.types.is_string_dtype SA01" \
308300
-i "pandas.api.types.is_timedelta64_ns_dtype SA01" \
309301
-i "pandas.api.types.pandas_dtype PR07,RT03,SA01" \
310-
-i "pandas.api.types.union_categoricals RT03,SA01" \
311302
-i "pandas.arrays.ArrowExtensionArray PR07,SA01" \
312303
-i "pandas.arrays.BooleanArray SA01" \
313304
-i "pandas.arrays.DatetimeArray SA01" \
@@ -330,7 +321,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
330321
-i "pandas.core.groupby.DataFrameGroupBy.hist RT03" \
331322
-i "pandas.core.groupby.DataFrameGroupBy.indices SA01" \
332323
-i "pandas.core.groupby.DataFrameGroupBy.max SA01" \
333-
-i "pandas.core.groupby.DataFrameGroupBy.median SA01" \
334324
-i "pandas.core.groupby.DataFrameGroupBy.min SA01" \
335325
-i "pandas.core.groupby.DataFrameGroupBy.nth PR02" \
336326
-i "pandas.core.groupby.DataFrameGroupBy.nunique SA01" \
@@ -349,7 +339,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
349339
-i "pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01" \
350340
-i "pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01" \
351341
-i "pandas.core.groupby.SeriesGroupBy.max SA01" \
352-
-i "pandas.core.groupby.SeriesGroupBy.median SA01" \
353342
-i "pandas.core.groupby.SeriesGroupBy.min SA01" \
354343
-i "pandas.core.groupby.SeriesGroupBy.nth PR02" \
355344
-i "pandas.core.groupby.SeriesGroupBy.ohlc SA01" \
@@ -364,7 +353,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
364353
-i "pandas.core.resample.Resampler.indices SA01" \
365354
-i "pandas.core.resample.Resampler.max PR01,RT03,SA01" \
366355
-i "pandas.core.resample.Resampler.mean SA01" \
367-
-i "pandas.core.resample.Resampler.median SA01" \
368356
-i "pandas.core.resample.Resampler.min PR01,RT03,SA01" \
369357
-i "pandas.core.resample.Resampler.ohlc SA01" \
370358
-i "pandas.core.resample.Resampler.prod SA01" \

doc/source/getting_started/comparison/comparison_with_sql.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -505,7 +505,7 @@ DELETE
505505
DELETE FROM tips
506506
WHERE tip > 9;
507507
508-
In pandas we select the rows that should remain instead of deleting them:
508+
In pandas we select the rows that should remain instead of deleting the rows that should be removed:
509509

510510
.. ipython:: python
511511

doc/source/user_guide/pyarrow.rst

+3-1
Original file line numberDiff line numberDiff line change
@@ -159,9 +159,11 @@ PyArrow also provides IO reading functionality that has been integrated into sev
159159
functions provide an ``engine`` keyword that can dispatch to PyArrow to accelerate reading from an IO source.
160160

161161
* :func:`read_csv`
162+
* :func:`read_feather`
162163
* :func:`read_json`
163164
* :func:`read_orc`
164-
* :func:`read_feather`
165+
* :func:`read_parquet`
166+
* :func:`read_table` (experimental)
165167

166168
.. ipython:: python
167169

doc/source/user_guide/style.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@
351351
"\n",
352352
"- Using [.set_table_styles()][table] to control broader areas of the table with specified internal CSS. Although table styles allow the flexibility to add CSS selectors and properties controlling all individual parts of the table, they are unwieldy for individual cell specifications. Also, note that table styles cannot be exported to Excel. \n",
353353
"- Using [.set_td_classes()][td_class] to directly link either external CSS classes to your data cells or link the internal CSS classes created by [.set_table_styles()][table]. See [here](#Setting-Classes-and-Linking-to-External-CSS). These cannot be used on column header rows or indexes, and also won't export to Excel. \n",
354-
"- Using the [.apply()][apply] and [.map()][map] functions to add direct internal CSS to specific data cells. See [here](#Styler-Functions). As of v1.4.0 there are also methods that work directly on column header rows or indexes; [.apply_index()][applyindex] and [.map_index()][mapindex]. Note that only these methods add styles that will export to Excel. These methods work in a similar way to [DataFrame.apply()][dfapply] and [DataFrame.map()][dfmap].\n",
354+
"- Using the [.apply()][apply] and [.map()][map] functions to add direct internal CSS to specific data cells. See [here](#Styler-Functions). As of v1.4.0 there are also methods that work directly on column header rows or indexes: [.apply_index()][applyindex] and [.map_index()][mapindex]. Note that only these methods add styles that will export to Excel. These methods work in a similar way to [DataFrame.apply()][dfapply] and [DataFrame.map()][dfmap].\n",
355355
"\n",
356356
"[table]: ../reference/api/pandas.io.formats.style.Styler.set_table_styles.rst\n",
357357
"[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n",

doc/source/user_guide/text.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ and replacing any remaining whitespaces with underscores:
204204

205205
.. warning::
206206

207-
The type of the Series is inferred and the allowed types (i.e. strings).
207+
The type of the Series is inferred and is one among the allowed types (i.e. strings).
208208

209209
Generally speaking, the ``.str`` accessor is intended to work only on strings. With very few
210210
exceptions, other uses are not supported, and may be disabled at a later point.

doc/source/whatsnew/v3.0.0.rst

+9-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,9 @@ Other enhancements
3333
- :func:`DataFrame.to_excel` now raises an ``UserWarning`` when the character count in a cell exceeds Excel's limitation of 32767 characters (:issue:`56954`)
3434
- :func:`read_stata` now returns ``datetime64`` resolutions better matching those natively stored in the stata format (:issue:`55642`)
3535
- :meth:`DataFrame.agg` called with ``axis=1`` and a ``func`` which relabels the result index now raises a ``NotImplementedError`` (:issue:`58807`).
36+
- :meth:`Index.get_loc` now accepts also subclasses of ``tuple`` as keys (:issue:`57922`)
3637
- :meth:`Styler.set_tooltips` provides alternative method to storing tooltips by using title attribute of td elements. (:issue:`56981`)
38+
- Added missing parameter ``weights`` in :meth:`DataFrame.plot.kde` for the estimation of the PDF (:issue:`59337`)
3739
- Allow dictionaries to be passed to :meth:`pandas.Series.str.replace` via ``pat`` parameter (:issue:`51748`)
3840
- Support passing a :class:`Series` input to :func:`json_normalize` that retains the :class:`Series` :class:`Index` (:issue:`51452`)
3941
- Support reading value labels from Stata 108-format (Stata 6) and earlier files (:issue:`58154`)
@@ -49,6 +51,7 @@ Other enhancements
4951
- :meth:`Series.cummin` and :meth:`Series.cummax` now supports :class:`CategoricalDtype` (:issue:`52335`)
5052
- :meth:`Series.plot` now correctly handle the ``ylabel`` parameter for pie charts, allowing for explicit control over the y-axis label (:issue:`58239`)
5153
- Restore support for reading Stata 104-format and enable reading 103-format dta files (:issue:`58554`)
54+
- Support reading Stata 102-format (Stata 1) dta files (:issue:`58978`)
5255
- Support reading Stata 110-format (Stata 7) dta files (:issue:`47176`)
5356

5457
.. ---------------------------------------------------------------------------
@@ -231,8 +234,10 @@ Other API changes
231234
^^^^^^^^^^^^^^^^^
232235
- 3rd party ``py.path`` objects are no longer explicitly supported in IO methods. Use :py:class:`pathlib.Path` objects instead (:issue:`57091`)
233236
- :func:`read_table`'s ``parse_dates`` argument defaults to ``None`` to improve consistency with :func:`read_csv` (:issue:`57476`)
237+
- All classes inheriting from builtin ``tuple`` (including types created with :func:`collections.namedtuple`) are now hashed and compared as builtin ``tuple`` during indexing operations (:issue:`57922`)
234238
- Made ``dtype`` a required argument in :meth:`ExtensionArray._from_sequence_of_strings` (:issue:`56519`)
235239
- Passing a :class:`Series` input to :func:`json_normalize` will now retain the :class:`Series` :class:`Index`, previously output had a new :class:`RangeIndex` (:issue:`51452`)
240+
- Removed :meth:`Index.sort` which always raised a ``TypeError``. This attribute is not defined and will raise an ``AttributeError`` (:issue:`59283`)
236241
- Updated :meth:`DataFrame.to_excel` so that the output spreadsheet has no styling. Custom styling can still be done using :meth:`Styler.to_excel` (:issue:`54154`)
237242
- pickle and HDF (``.h5``) files created with Python 2 are no longer explicitly supported (:issue:`57387`)
238243
- pickled objects from pandas version less than ``1.0.0`` are no longer supported (:issue:`57155`)
@@ -280,6 +285,7 @@ Other Deprecations
280285
- Deprecated allowing non-keyword arguments in :meth:`DataFrame.all`, :meth:`DataFrame.min`, :meth:`DataFrame.max`, :meth:`DataFrame.sum`, :meth:`DataFrame.prod`, :meth:`DataFrame.mean`, :meth:`DataFrame.median`, :meth:`DataFrame.sem`, :meth:`DataFrame.var`, :meth:`DataFrame.std`, :meth:`DataFrame.skew`, :meth:`DataFrame.kurt`, :meth:`Series.all`, :meth:`Series.min`, :meth:`Series.max`, :meth:`Series.sum`, :meth:`Series.prod`, :meth:`Series.mean`, :meth:`Series.median`, :meth:`Series.sem`, :meth:`Series.var`, :meth:`Series.std`, :meth:`Series.skew`, and :meth:`Series.kurt`. (:issue:`57087`)
281286
- Deprecated allowing non-keyword arguments in :meth:`Series.to_markdown` except ``buf``. (:issue:`57280`)
282287
- Deprecated allowing non-keyword arguments in :meth:`Series.to_string` except ``buf``. (:issue:`57280`)
288+
- Deprecated behavior of :meth:`.DataFrameGroupBy.groups` and :meth:`.SeriesGroupBy.groups`, in a future version ``groups`` by one element list will return tuple instead of scalar. (:issue:`58858`)
283289
- Deprecated behavior of :meth:`Series.dt.to_pytimedelta`, in a future version this will return a :class:`Series` containing python ``datetime.timedelta`` objects instead of an ``ndarray`` of timedelta; this matches the behavior of other :meth:`Series.dt` properties. (:issue:`57463`)
284290
- Deprecated lowercase strings ``d``, ``b`` and ``c`` denoting frequencies in :class:`Day`, :class:`BusinessDay` and :class:`CustomBusinessDay` in favour of ``D``, ``B`` and ``C`` (:issue:`58998`)
285291
- Deprecated lowercase strings ``w``, ``w-mon``, ``w-tue``, etc. denoting frequencies in :class:`Week` in favour of ``W``, ``W-MON``, ``W-TUE``, etc. (:issue:`58998`)
@@ -578,10 +584,12 @@ I/O
578584
- Bug in :meth:`read_excel` raising ``ValueError`` when passing array of boolean values when ``dtype="boolean"``. (:issue:`58159`)
579585
- Bug in :meth:`read_json` not validating the ``typ`` argument to not be exactly ``"frame"`` or ``"series"`` (:issue:`59124`)
580586
- Bug in :meth:`read_stata` raising ``KeyError`` when input file is stored in big-endian format and contains strL data. (:issue:`58638`)
587+
- Bug in :meth:`read_stata` where extreme value integers were incorrectly interpreted as missing for format versions 111 and prior (:issue:`58130`)
588+
- Bug in :meth:`read_stata` where the missing code for double was not recognised for format versions 105 and prior (:issue:`58149`)
581589

582590
Period
583591
^^^^^^
584-
-
592+
- Fixed error message when passing invalid period alias to :meth:`PeriodIndex.to_timestamp` (:issue:`58974`)
585593
-
586594

587595
Plotting

pandas/_config/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,6 @@
3030
from pandas._config.display import detect_console_encoding
3131

3232

33-
def using_pyarrow_string_dtype() -> bool:
33+
def using_string_dtype() -> bool:
3434
_mode_options = _global_config["future"]
3535
return _mode_options["infer_string"]

pandas/_libs/include/pandas/vendored/klib/khash_python.h

+4-2
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,8 @@ static inline int pyobject_cmp(PyObject *a, PyObject *b) {
207207
if (PyComplex_CheckExact(a)) {
208208
return complexobject_cmp((PyComplexObject *)a, (PyComplexObject *)b);
209209
}
210-
if (PyTuple_CheckExact(a)) {
210+
if (PyTuple_Check(a)) {
211+
// compare tuple subclasses as builtin tuples
211212
return tupleobject_cmp((PyTupleObject *)a, (PyTupleObject *)b);
212213
}
213214
// frozenset isn't yet supported
@@ -311,7 +312,8 @@ static inline khuint32_t kh_python_hash_func(PyObject *key) {
311312
// because complex(k,0) == k holds for any int-object k
312313
// and kh_complex128_hash_func doesn't respect it
313314
hash = complexobject_hash((PyComplexObject *)key);
314-
} else if (PyTuple_CheckExact(key)) {
315+
} else if (PyTuple_Check(key)) {
316+
// hash tuple subclasses as builtin tuples
315317
hash = tupleobject_hash((PyTupleObject *)key);
316318
} else {
317319
hash = PyObject_Hash(key);

pandas/_libs/lib.pyx

+3-3
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ from cython cimport (
3737
floating,
3838
)
3939

40-
from pandas._config import using_pyarrow_string_dtype
40+
from pandas._config import using_string_dtype
4141

4242
from pandas._libs.missing import check_na_tuples_nonequal
4343

@@ -2699,10 +2699,10 @@ def maybe_convert_objects(ndarray[object] objects,
26992699
seen.object_ = True
27002700

27012701
elif seen.str_:
2702-
if using_pyarrow_string_dtype() and is_string_array(objects, skipna=True):
2702+
if using_string_dtype() and is_string_array(objects, skipna=True):
27032703
from pandas.core.arrays.string_ import StringDtype
27042704

2705-
dtype = StringDtype(storage="pyarrow_numpy")
2705+
dtype = StringDtype(storage="pyarrow", na_value=np.nan)
27062706
return dtype.construct_array_type()._from_sequence(objects, dtype=dtype)
27072707

27082708
elif convert_to_nullable_dtype and is_string_array(objects, skipna=True):

pandas/_libs/tslibs/nattype.pyx

+7
Original file line numberDiff line numberDiff line change
@@ -441,6 +441,13 @@ class NaTType(_NaT):
441441
442442
Monday == 1 ... Sunday == 7.
443443
444+
See Also
445+
--------
446+
Timestamp.weekday : Return the day of the week with Monday=0, Sunday=6.
447+
Timestamp.isocalendar : Return a tuple containing ISO year, week number
448+
and weekday.
449+
datetime.date.isoweekday : Equivalent method in datetime module.
450+
444451
Examples
445452
--------
446453
>>> ts = pd.Timestamp('2023-01-01 10:00:00')

0 commit comments

Comments
 (0)