Skip to content

Commit ec90803

Browse files
Merge branch 'main' into main
2 parents e16a327 + 2419343 commit ec90803

File tree

105 files changed

+1305
-788
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

105 files changed

+1305
-788
lines changed

.circleci/config.yml

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,13 @@ jobs:
9292
no_output_timeout: 30m # Sometimes the tests won't generate any output, make sure the job doesn't get killed by that
9393
command: |
9494
pip3 install cibuildwheel==2.20.0
95-
cibuildwheel --output-dir wheelhouse
95+
if [[ $CIBW_BUILD == cp313t* ]]; then
96+
# TODO: temporarily run 3.13 free threaded builds without build isolation
97+
# since we need pre-release cython
98+
CIBW_BUILD_FRONTEND="pip; args: --no-build-isolation" cibuildwheel --output-dir wheelhouse
99+
else
100+
cibuildwheel --output-dir wheelhouse
101+
fi
96102
97103
environment:
98104
CIBW_BUILD: << parameters.cibw-build >>
@@ -141,6 +147,10 @@ workflows:
141147
cibw-build: ["cp310-manylinux_aarch64",
142148
"cp311-manylinux_aarch64",
143149
"cp312-manylinux_aarch64",
150+
"cp313-manylinux_aarch64",
151+
"cp313t-manylinux_aarch64",
144152
"cp310-musllinux_aarch64",
145153
"cp311-musllinux_aarch64",
146-
"cp312-musllinux_aarch64",]
154+
"cp312-musllinux_aarch64",
155+
"cp313-musllinux_aarch64",
156+
"cp313t-musllinux_aarch64"]

.github/workflows/unit-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ jobs:
380380
fetch-depth: 0
381381

382382
- name: Set up Python Free-threading Version
383-
uses: deadsnakes/action@v3.1.0
383+
uses: deadsnakes/action@v3.2.0
384384
with:
385385
python-version: 3.13-dev
386386
nogil: true

.github/workflows/wheels.yml

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,7 @@ jobs:
102102
python: [["cp310", "3.10"], ["cp311", "3.11"], ["cp312", "3.12"], ["cp313", "3.13"], ["cp313t", "3.13"]]
103103
include:
104104
# TODO: Remove this plus installing build deps in cibw_before_build.sh
105-
# and test deps in cibw_before_test.sh after pandas can be built with a released NumPy/Cython
106-
- python: ["cp313", "3.13"]
107-
cibw_build_frontend: 'pip; args: --no-build-isolation'
105+
# after pandas can be built with a released NumPy/Cython
108106
- python: ["cp313t", "3.13"]
109107
cibw_build_frontend: 'pip; args: --no-build-isolation'
110108
# Build Pyodide wheels and upload them to Anaconda.org
@@ -187,11 +185,9 @@ jobs:
187185
- name: Test Windows Wheels
188186
if: ${{ matrix.buildplat[1] == 'win_amd64' }}
189187
shell: pwsh
190-
# TODO: Remove NumPy nightly install when there's a 3.13 wheel on PyPI
191188
run: |
192189
$TST_CMD = @"
193190
python -m pip install hypothesis>=6.84.0 pytest>=7.3.2 pytest-xdist>=3.4.0;
194-
${{ matrix.python[1] == '3.13' && 'python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy;' }}
195191
python -m pip install `$(Get-Item pandas\wheelhouse\*.whl);
196192
python -c `'import pandas as pd; pd.test(extra_args=[`\"--no-strict-data-files`\", `\"-m not clipboard and not single_cpu and not slow and not network and not db`\"])`';
197193
"@

MANIFEST.in

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,4 +65,3 @@ graft pandas/_libs/include
6565

6666
# Include cibw script in sdist since it's needed for building wheels
6767
include scripts/cibw_before_build.sh
68-
include scripts/cibw_before_test.sh

ci/code_checks.sh

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -70,14 +70,10 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
7070
--format=actions \
7171
-i ES01 `# For now it is ok if docstrings are missing the extended summary` \
7272
-i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \
73-
-i "pandas.NA SA01" \
7473
-i "pandas.Period.freq GL08" \
7574
-i "pandas.Period.ordinal GL08" \
76-
-i "pandas.PeriodDtype.freq SA01" \
7775
-i "pandas.RangeIndex.from_range PR01,SA01" \
78-
-i "pandas.RangeIndex.start SA01" \
7976
-i "pandas.RangeIndex.step SA01" \
80-
-i "pandas.RangeIndex.stop SA01" \
8177
-i "pandas.Series.cat.add_categories PR01,PR02" \
8278
-i "pandas.Series.cat.as_ordered PR01" \
8379
-i "pandas.Series.cat.as_unordered PR01" \
@@ -92,10 +88,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
9288
-i "pandas.Series.dt.floor PR01,PR02" \
9389
-i "pandas.Series.dt.freq GL08" \
9490
-i "pandas.Series.dt.month_name PR01,PR02" \
95-
-i "pandas.Series.dt.nanoseconds SA01" \
9691
-i "pandas.Series.dt.normalize PR01" \
9792
-i "pandas.Series.dt.round PR01,PR02" \
98-
-i "pandas.Series.dt.seconds SA01" \
9993
-i "pandas.Series.dt.strftime PR01,PR02" \
10094
-i "pandas.Series.dt.to_period PR01,PR02" \
10195
-i "pandas.Series.dt.total_seconds PR01" \
@@ -107,36 +101,22 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
107101
-i "pandas.Series.sparse.from_coo PR07,SA01" \
108102
-i "pandas.Series.sparse.npoints SA01" \
109103
-i "pandas.Series.sparse.sp_values SA01" \
110-
-i "pandas.Timedelta.components SA01" \
111104
-i "pandas.Timedelta.max PR02" \
112105
-i "pandas.Timedelta.min PR02" \
113106
-i "pandas.Timedelta.resolution PR02" \
114107
-i "pandas.Timedelta.to_timedelta64 SA01" \
115-
-i "pandas.Timedelta.total_seconds SA01" \
116-
-i "pandas.TimedeltaIndex.nanoseconds SA01" \
117-
-i "pandas.TimedeltaIndex.seconds SA01" \
118108
-i "pandas.TimedeltaIndex.to_pytimedelta RT03,SA01" \
119109
-i "pandas.Timestamp.nanosecond GL08" \
120110
-i "pandas.Timestamp.resolution PR02" \
121111
-i "pandas.Timestamp.tzinfo GL08" \
122112
-i "pandas.Timestamp.year GL08" \
123-
-i "pandas.api.extensions.ExtensionArray.interpolate PR01,SA01" \
124-
-i "pandas.api.types.is_bool PR01,SA01" \
125-
-i "pandas.api.types.is_categorical_dtype SA01" \
126-
-i "pandas.api.types.is_complex PR01,SA01" \
127-
-i "pandas.api.types.is_complex_dtype SA01" \
128-
-i "pandas.api.types.is_datetime64_dtype SA01" \
129-
-i "pandas.api.types.is_datetime64_ns_dtype SA01" \
130-
-i "pandas.api.types.is_datetime64tz_dtype SA01" \
131113
-i "pandas.api.types.is_dict_like PR07,SA01" \
132-
-i "pandas.api.types.is_extension_array_dtype SA01" \
133114
-i "pandas.api.types.is_file_like PR07,SA01" \
134115
-i "pandas.api.types.is_float PR01,SA01" \
135116
-i "pandas.api.types.is_float_dtype SA01" \
136117
-i "pandas.api.types.is_hashable PR01,RT03,SA01" \
137118
-i "pandas.api.types.is_int64_dtype SA01" \
138119
-i "pandas.api.types.is_integer PR01,SA01" \
139-
-i "pandas.api.types.is_integer_dtype SA01" \
140120
-i "pandas.api.types.is_interval_dtype SA01" \
141121
-i "pandas.api.types.is_iterator PR07,SA01" \
142122
-i "pandas.api.types.is_list_like SA01" \
@@ -148,7 +128,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
148128
-i "pandas.arrays.ArrowExtensionArray PR07,SA01" \
149129
-i "pandas.arrays.BooleanArray SA01" \
150130
-i "pandas.arrays.DatetimeArray SA01" \
151-
-i "pandas.arrays.FloatingArray SA01" \
152131
-i "pandas.arrays.IntegerArray SA01" \
153132
-i "pandas.arrays.IntervalArray.left SA01" \
154133
-i "pandas.arrays.IntervalArray.length SA01" \
@@ -161,35 +140,27 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
161140
-i "pandas.core.groupby.DataFrameGroupBy.agg RT03" \
162141
-i "pandas.core.groupby.DataFrameGroupBy.aggregate RT03" \
163142
-i "pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01" \
164-
-i "pandas.core.groupby.DataFrameGroupBy.filter SA01" \
165143
-i "pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01" \
166144
-i "pandas.core.groupby.DataFrameGroupBy.groups SA01" \
167145
-i "pandas.core.groupby.DataFrameGroupBy.hist RT03" \
168146
-i "pandas.core.groupby.DataFrameGroupBy.indices SA01" \
169-
-i "pandas.core.groupby.DataFrameGroupBy.max SA01" \
170-
-i "pandas.core.groupby.DataFrameGroupBy.min SA01" \
171147
-i "pandas.core.groupby.DataFrameGroupBy.nth PR02" \
172148
-i "pandas.core.groupby.DataFrameGroupBy.nunique SA01" \
173149
-i "pandas.core.groupby.DataFrameGroupBy.ohlc SA01" \
174150
-i "pandas.core.groupby.DataFrameGroupBy.plot PR02" \
175151
-i "pandas.core.groupby.DataFrameGroupBy.sem SA01" \
176-
-i "pandas.core.groupby.DataFrameGroupBy.sum SA01" \
177152
-i "pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01" \
178153
-i "pandas.core.groupby.SeriesGroupBy.agg RT03" \
179154
-i "pandas.core.groupby.SeriesGroupBy.aggregate RT03" \
180-
-i "pandas.core.groupby.SeriesGroupBy.filter PR01,SA01" \
181155
-i "pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01" \
182156
-i "pandas.core.groupby.SeriesGroupBy.groups SA01" \
183157
-i "pandas.core.groupby.SeriesGroupBy.indices SA01" \
184158
-i "pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01" \
185159
-i "pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01" \
186-
-i "pandas.core.groupby.SeriesGroupBy.max SA01" \
187-
-i "pandas.core.groupby.SeriesGroupBy.min SA01" \
188160
-i "pandas.core.groupby.SeriesGroupBy.nth PR02" \
189161
-i "pandas.core.groupby.SeriesGroupBy.ohlc SA01" \
190162
-i "pandas.core.groupby.SeriesGroupBy.plot PR02" \
191163
-i "pandas.core.groupby.SeriesGroupBy.sem SA01" \
192-
-i "pandas.core.groupby.SeriesGroupBy.sum SA01" \
193164
-i "pandas.core.resample.Resampler.__iter__ RT03,SA01" \
194165
-i "pandas.core.resample.Resampler.ffill RT03" \
195166
-i "pandas.core.resample.Resampler.get_group RT03,SA01" \
@@ -218,7 +189,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
218189
-i "pandas.errors.IntCastingNaNError SA01" \
219190
-i "pandas.errors.InvalidIndexError SA01" \
220191
-i "pandas.errors.InvalidVersion SA01" \
221-
-i "pandas.errors.MergeError SA01" \
222192
-i "pandas.errors.NullFrequencyError SA01" \
223193
-i "pandas.errors.NumExprClobberingError SA01" \
224194
-i "pandas.errors.NumbaUtilError SA01" \
@@ -411,7 +381,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
411381
-i "pandas.tseries.offsets.Week.n GL08" \
412382
-i "pandas.tseries.offsets.Week.normalize GL08" \
413383
-i "pandas.tseries.offsets.Week.weekday GL08" \
414-
-i "pandas.tseries.offsets.WeekOfMonth SA01" \
415384
-i "pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08" \
416385
-i "pandas.tseries.offsets.WeekOfMonth.n GL08" \
417386
-i "pandas.tseries.offsets.WeekOfMonth.normalize GL08" \

doc/source/conf.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,9 @@
254254
"json_url": "https://pandas.pydata.org/versions.json",
255255
"version_match": switcher_version,
256256
},
257-
"show_version_warning_banner": True,
257+
# This shows a warning for patch releases since the
258+
# patch version doesn't compare as equal (e.g. 2.2.1 != 2.2.0 but it should be)
259+
"show_version_warning_banner": False,
258260
"icon_links": [
259261
{
260262
"name": "Mastodon",

doc/source/whatsnew/index.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ Version 2.2
3232
.. toctree::
3333
:maxdepth: 2
3434

35+
v2.2.3
3536
v2.2.2
3637
v2.2.1
3738
v2.2.0

doc/source/whatsnew/v2.2.2.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,4 +56,4 @@ Other
5656
Contributors
5757
~~~~~~~~~~~~
5858

59-
.. contributors:: v2.2.1..v2.2.2|HEAD
59+
.. contributors:: v2.2.1..v2.2.2

doc/source/whatsnew/v2.2.3.rst

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
.. _whatsnew_223:
2+
3+
What's new in 2.2.3 (September 20, 2024)
4+
----------------------------------------
5+
6+
These are the changes in pandas 2.2.3. See :ref:`release` for a full changelog
7+
including other versions of pandas.
8+
9+
{{ header }}
10+
11+
.. ---------------------------------------------------------------------------
12+
13+
.. _whatsnew_220.py13_compat:
14+
15+
Pandas 2.2.3 is now compatible with Python 3.13
16+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17+
18+
Pandas 2.2.3 is the first version of pandas that is generally compatible with the upcoming
19+
Python 3.13, and both wheels for free-threaded and normal Python 3.13 will be uploaded for
20+
this release.
21+
22+
As usual please report any bugs discovered to our `issue tracker <https://github.com/pandas-dev/pandas/issues/new/choose>`_
23+
24+
.. ---------------------------------------------------------------------------
25+
.. _whatsnew_223.bug_fixes:
26+
27+
Bug fixes
28+
~~~~~~~~~
29+
- Bug in :func:`eval` on :class:`complex` including division ``/`` discards imaginary part. (:issue:`21374`)
30+
- Minor fixes for numpy 2.1 compatibility. (:issue:`59444`)
31+
32+
.. ---------------------------------------------------------------------------
33+
.. _whatsnew_223.other:
34+
35+
Other
36+
~~~~~
37+
- Missing licenses for 3rd party dependencies were added back into the wheels. (:issue:`58632`)
38+
39+
.. ---------------------------------------------------------------------------
40+
.. _whatsnew_223.contributors:
41+
42+
Contributors
43+
~~~~~~~~~~~~
44+
45+
.. contributors:: v2.2.2..v2.2.3|HEAD

doc/source/whatsnew/v2.3.0.rst

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,11 @@ Conversion
102102

103103
Strings
104104
^^^^^^^
105+
- Bug in :meth:`Series.rank` for :class:`StringDtype` with ``storage="pyarrow"`` incorrectly returning integer results in case of ``method="average"`` and raising an error if it would truncate results (:issue:`59768`)
105106
- Bug in :meth:`Series.str.replace` when ``n < 0`` for :class:`StringDtype` with ``storage="pyarrow"`` (:issue:`59628`)
107+
- Bug in ``ser.str.slice`` with negative ``step`` with :class:`ArrowDtype` and :class:`StringDtype` with ``storage="pyarrow"`` giving incorrect results (:issue:`59710`)
106108
- Bug in the ``center`` method on :class:`Series` and :class:`Index` object ``str`` accessors with pyarrow-backed dtype not matching the python behavior in corner cases with an odd number of fill characters (:issue:`54792`)
107-
109+
-
108110

109111
Interval
110112
^^^^^^^^

doc/source/whatsnew/v3.0.0.rst

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ Other enhancements
5555
- :meth:`Series.plot` now correctly handle the ``ylabel`` parameter for pie charts, allowing for explicit control over the y-axis label (:issue:`58239`)
5656
- :meth:`DataFrame.plot.scatter` argument ``c`` now accepts a column of strings, where rows with the same string are colored identically (:issue:`16827` and :issue:`16485`)
5757
- :meth:`pandas.concat` will raise a ``ValueError`` when ``ignore_index=True`` and ``keys`` is not ``None`` (:issue:`59274`)
58+
- :meth:`str.get_dummies` now accepts a ``dtype`` parameter to specify the dtype of the resulting DataFrame (:issue:`47872`)
5859
- Multiplying two :class:`DateOffset` objects will now raise a ``TypeError`` instead of a ``RecursionError`` (:issue:`59442`)
5960
- Restore support for reading Stata 104-format and enable reading 103-format dta files (:issue:`58554`)
6061
- Support passing a :class:`Iterable[Hashable]` input to :meth:`DataFrame.drop_duplicates` (:issue:`59237`)
@@ -626,6 +627,7 @@ I/O
626627
- Bug in :meth:`read_csv` causing segmentation fault when ``encoding_errors`` is not a string. (:issue:`59059`)
627628
- Bug in :meth:`read_csv` raising ``TypeError`` when ``index_col`` is specified and ``na_values`` is a dict containing the key ``None``. (:issue:`57547`)
628629
- Bug in :meth:`read_csv` raising ``TypeError`` when ``nrows`` and ``iterator`` are specified without specifying a ``chunksize``. (:issue:`59079`)
630+
- Bug in :meth:`read_csv` where the order of the ``na_values`` makes an inconsistency when ``na_values`` is a list non-string values. (:issue:`59303`)
629631
- Bug in :meth:`read_excel` raising ``ValueError`` when passing array of boolean values when ``dtype="boolean"``. (:issue:`58159`)
630632
- Bug in :meth:`read_json` not validating the ``typ`` argument to not be exactly ``"frame"`` or ``"series"`` (:issue:`59124`)
631633
- Bug in :meth:`read_stata` raising ``KeyError`` when input file is stored in big-endian format and contains strL data. (:issue:`58638`)
@@ -691,7 +693,6 @@ Other
691693
^^^^^
692694
- Bug in :class:`DataFrame` when passing a ``dict`` with a NA scalar and ``columns`` that would always return ``np.nan`` (:issue:`57205`)
693695
- Bug in :func:`eval` on :class:`ExtensionArray` on including division ``/`` failed with a ``TypeError``. (:issue:`58748`)
694-
- Bug in :func:`eval` on :class:`complex` including division ``/`` discards imaginary part. (:issue:`21374`)
695696
- Bug in :func:`eval` where the names of the :class:`Series` were not preserved when using ``engine="numexpr"``. (:issue:`10239`)
696697
- Bug in :func:`unique` on :class:`Index` not always returning :class:`Index` (:issue:`57043`)
697698
- Bug in :meth:`DataFrame.apply` where passing ``engine="numba"`` ignored ``args`` passed to the applied function (:issue:`58712`)

pandas/_libs/lib.pyx

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -600,6 +600,8 @@ def array_equivalent_object(ndarray left, ndarray right) -> bool:
600600
if not array_equivalent(x, y):
601601
return False
602602

603+
elif PyArray_Check(x) or PyArray_Check(y):
604+
return False
603605
elif (x is C_NA) ^ (y is C_NA):
604606
return False
605607
elif not (
@@ -733,7 +735,9 @@ cpdef ndarray[object] ensure_string_array(
733735
convert_na_value : bool, default True
734736
If False, existing na values will be used unchanged in the new array.
735737
copy : bool, default True
736-
Whether to ensure that a new array is returned.
738+
Whether to ensure that a new array is returned. When True, a new array
739+
is always returned. When False, a new array is only returned when needed
740+
to avoid mutating the input array.
737741
skipna : bool, default True
738742
Whether or not to coerce nulls to their stringified form
739743
(e.g. if False, NaN becomes 'nan').
@@ -762,11 +766,15 @@ cpdef ndarray[object] ensure_string_array(
762766

763767
result = np.asarray(arr, dtype="object")
764768

765-
if copy and (result is arr or np.shares_memory(arr, result)):
766-
# GH#54654
767-
result = result.copy()
768-
elif not copy and result is arr:
769-
already_copied = False
769+
if result is arr or np.may_share_memory(arr, result):
770+
# if np.asarray(..) did not make a copy of the input arr, we still need
771+
# to do that to avoid mutating the input array
772+
# GH#54654: share_memory check is needed for rare cases where np.asarray
773+
# returns a new object without making a copy of the actual data
774+
if copy:
775+
result = result.copy()
776+
else:
777+
already_copied = False
770778
elif not copy and not result.flags.writeable:
771779
# Weird edge case where result is a view
772780
already_copied = False
@@ -1123,10 +1131,21 @@ def is_bool(obj: object) -> bool:
11231131
"""
11241132
Return True if given object is boolean.
11251133

1134+
Parameters
1135+
----------
1136+
obj : object
1137+
Object to check.
1138+
11261139
Returns
11271140
-------
11281141
bool
11291142

1143+
See Also
1144+
--------
1145+
api.types.is_scalar : Check if the input is a scalar.
1146+
api.types.is_integer : Check if the input is an integer.
1147+
api.types.is_float : Check if the input is a float.
1148+
11301149
Examples
11311150
--------
11321151
>>> pd.api.types.is_bool(True)
@@ -1142,10 +1161,22 @@ def is_complex(obj: object) -> bool:
11421161
"""
11431162
Return True if given object is complex.
11441163

1164+
Parameters
1165+
----------
1166+
obj : object
1167+
Object to check.
1168+
11451169
Returns
11461170
-------
11471171
bool
11481172

1173+
See Also
1174+
--------
1175+
api.types.is_complex_dtype: Check whether the provided array or
1176+
dtype is of a complex dtype.
1177+
api.types.is_number: Check if the object is a number.
1178+
api.types.is_integer: Return True if given object is integer.
1179+
11491180
Examples
11501181
--------
11511182
>>> pd.api.types.is_complex(1 + 1j)

pandas/_libs/missing.pyx

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -347,6 +347,14 @@ class NAType(C_NAType):
347347
The NA singleton is a missing value indicator defined by pandas. It is
348348
used in certain new extension dtypes (currently the "string" dtype).
349349
350+
See Also
351+
--------
352+
numpy.nan : Floating point representation of Not a Number (NaN) for numerical data.
353+
isna : Detect missing values for an array-like object.
354+
notna : Detect non-missing values for an array-like object.
355+
DataFrame.fillna : Fill missing values in a DataFrame.
356+
Series.fillna : Fill missing values in a Series.
357+
350358
Examples
351359
--------
352360
>>> pd.NA

0 commit comments

Comments
 (0)