diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index aef97c232e940..fd12df0afda65 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -18,7 +18,7 @@ dependencies: # required dependencies - python-dateutil - - numpy<1.24.0 + - numpy - pytz # optional dependencies diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index 60ec74553a207..6f31358dabe86 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -9,6 +9,7 @@ np_version_under1p21 = _nlv < Version("1.21") np_version_under1p22 = _nlv < Version("1.22") np_version_gte1p22 = _nlv >= Version("1.22") +np_version_gte1p24 = _nlv >= Version("1.24") is_numpy_dev = _nlv.dev is not None _min_numpy_ver = "1.20.3" diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 8324d4b2618f1..e9ffb9af98323 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -428,10 +428,14 @@ def to_numpy( "for this dtype." ) # don't pass copy to astype -> always need a copy since we are mutating - data = self._data.astype(dtype) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + data = self._data.astype(dtype) data[self._mask] = na_value else: - data = self._data.astype(dtype, copy=copy) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + data = self._data.astype(dtype, copy=copy) return data @doc(ExtensionArray.tolist) @@ -464,7 +468,10 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: # if we are astyping to another nullable masked dtype, we can fastpath if isinstance(dtype, BaseMaskedDtype): # TODO deal with NaNs for FloatingArray case - data = self._data.astype(dtype.numpy_dtype, copy=copy) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + # TODO: Is rounding what we want long term? + data = self._data.astype(dtype.numpy_dtype, copy=copy) # mask is copied depending on whether the data was copied, and # not directly depending on the `copy` keyword mask = self._mask if data is self._data else self._mask.copy() diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 861c9712cd2ae..395684dfcd6bd 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -7,6 +7,7 @@ Iterator, cast, ) +import warnings import numpy as np @@ -917,11 +918,20 @@ def sequence_to_td64ns( mask = np.isnan(data) # The next few lines are effectively a vectorized 'cast_from_unit' m, p = precision_from_unit(unit or "ns") - base = data.astype(np.int64) + with warnings.catch_warnings(): + # Suppress RuntimeWarning about All-NaN slice + warnings.filterwarnings( + "ignore", "invalid value encountered in cast", RuntimeWarning + ) + base = data.astype(np.int64) frac = data - base if p: frac = np.round(frac, p) - data = (base * m + (frac * m).astype(np.int64)).view("timedelta64[ns]") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in cast", RuntimeWarning + ) + data = (base * m + (frac * m).astype(np.int64)).view("timedelta64[ns]") data[mask] = iNaT copy = False diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py index e5b0b5658534f..681151f8c7b5f 100644 --- a/pandas/core/dtypes/astype.py +++ b/pandas/core/dtypes/astype.py @@ -9,6 +9,7 @@ TYPE_CHECKING, overload, ) +import warnings import numpy as np @@ -153,7 +154,9 @@ def _astype_float_to_int_nansafe( # GH#45151 if not (values >= 0).all(): raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}") - return values.astype(dtype, copy=copy) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + return values.astype(dtype, copy=copy) def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 3be89f6da2bd8..659da41acb1c4 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -398,7 +398,11 @@ def trans(x): and not is_bool_dtype(result.dtype) and not is_string_dtype(result.dtype) ): - new_result = result.astype(dtype) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "overflow encountered in cast", RuntimeWarning + ) + new_result = result.astype(dtype) # Adjust tolerances based on floating point size size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16} @@ -1606,7 +1610,9 @@ def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.n ) casted = np.array(arr, dtype=dtype, copy=False) else: - casted = arr.astype(dtype, copy=False) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + casted = arr.astype(dtype, copy=False) except OverflowError as err: raise OverflowError( "The elements provided in the data cannot all be " @@ -1618,7 +1624,7 @@ def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.n return casted with warnings.catch_warnings(): - warnings.filterwarnings("ignore") + warnings.filterwarnings("ignore", category=RuntimeWarning) if np.array_equal(arr, casted): return casted @@ -1821,7 +1827,9 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: # see test_where_complex GH#6345 return dtype.type(element) - casted = dtype.type(element) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + casted = dtype.type(element) if casted == element: return casted # otherwise e.g. overflow see test_32878_complex_itemsize diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 0caa8005f1ebc..e427442a1f6fb 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3872,9 +3872,15 @@ def _get_fill_indexer( # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine - return engine.get_indexer_with_fill( # type: ignore[union-attr] - target=target._values, values=self._values, method=method, limit=limit - ) + with warnings.catch_warnings(): + # TODO: We need to fix this. Casting to int64 in cython + warnings.filterwarnings("ignore", category=RuntimeWarning) + return engine.get_indexer_with_fill( # type: ignore[union-attr] + target=target._values, + values=self._values, + method=method, + limit=limit, + ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py index b51846e34fd88..923dc24240f61 100644 --- a/pandas/tests/extension/test_period.py +++ b/pandas/tests/extension/test_period.py @@ -17,10 +17,13 @@ import pytest from pandas._libs import iNaT +from pandas.compat import is_platform_windows +from pandas.compat.numpy import np_version_gte1p24 from pandas.core.dtypes.dtypes import PeriodDtype import pandas as pd +import pandas._testing as tm from pandas.core.arrays import PeriodArray from pandas.tests.extension import base @@ -94,6 +97,14 @@ def test_combine_add(self, data_repeated): # Period + Period is not defined. pass + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods): + if is_platform_windows() and np_version_gte1p24: + with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False): + super().test_diff(data, periods) + else: + super().test_diff(data, periods) + class TestInterface(BasePeriodTests, base.BaseInterfaceTests): diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index d5a7610ecb8a9..4c6bc3583676a 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -21,6 +21,7 @@ IS64, is_ci_environment, ) +from pandas.compat.numpy import np_version_gte1p24 from pandas.errors import ParserError import pandas.util._test_decorators as td @@ -114,14 +115,16 @@ def test_dtype_and_names_error(c_parser_only): 3.0 3 """ # fallback casting, but not castable + warning = RuntimeWarning if np_version_gte1p24 else None with pytest.raises(ValueError, match="cannot safely convert"): - parser.read_csv( - StringIO(data), - sep=r"\s+", - header=None, - names=["a", "b"], - dtype={"a": np.int32}, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + parser.read_csv( + StringIO(data), + sep=r"\s+", + header=None, + names=["a", "b"], + dtype={"a": np.int32}, + ) @pytest.mark.parametrize( diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 49c5b78a48a9f..57ad94102cb55 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -804,7 +804,7 @@ def test_constructor_floating_data_int_dtype(self, frame_or_series): # Long-standing behavior (for Series, new in 2.0 for DataFrame) # has been to ignore the dtype on these; # not clear if this is what we want long-term - expected = frame_or_series(arr) + # expected = frame_or_series(arr) # GH#49599 as of 2.0 we raise instead of silently retaining float dtype msg = "Trying to coerce float values to integer" @@ -816,7 +816,7 @@ def test_constructor_floating_data_int_dtype(self, frame_or_series): # pre-2.0, when we had NaNs, we silently ignored the integer dtype arr[0] = np.nan - expected = frame_or_series(arr) + # expected = frame_or_series(arr) msg = r"Cannot convert non-finite values \(NA or inf\) to integer" with pytest.raises(IntCastingNaNError, match=msg):