diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index fd1770df8e5d3..23286343d7367 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -33,7 +33,7 @@ np.uint8, ] datetime_dtypes = [np.datetime64, np.timedelta64] -string_dtypes = [np.object] +string_dtypes = [object] try: extension_dtypes = [ pd.Int8Dtype, diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index d78419c12ce0d..258c29c145721 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -58,17 +58,15 @@ def time_isin_nan_values(self): class IsInForObjects: def setup(self): - self.s_nans = Series(np.full(10 ** 4, np.nan)).astype(np.object) - self.vals_nans = np.full(10 ** 4, np.nan).astype(np.object) - self.s_short = Series(np.arange(2)).astype(np.object) - self.s_long = Series(np.arange(10 ** 5)).astype(np.object) - self.vals_short = np.arange(2).astype(np.object) - self.vals_long = np.arange(10 ** 5).astype(np.object) + self.s_nans = Series(np.full(10 ** 4, np.nan)).astype(object) + self.vals_nans = np.full(10 ** 4, np.nan).astype(object) + self.s_short = Series(np.arange(2)).astype(object) + self.s_long = Series(np.arange(10 ** 5)).astype(object) + self.vals_short = np.arange(2).astype(object) + self.vals_long = np.arange(10 ** 5).astype(object) # because of nans floats are special: - self.s_long_floats = Series(np.arange(10 ** 5, dtype=np.float)).astype( - np.object - ) - self.vals_long_floats = np.arange(10 ** 5, dtype=np.float).astype(np.object) + self.s_long_floats = Series(np.arange(10 ** 5, dtype=np.float)).astype(object) + self.vals_long_floats = np.arange(10 ** 5, dtype=np.float).astype(object) def time_isin_nans(self): # if nan-objects are different objects, diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index d6aa41a7e0f32..28ceb25eebd96 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -32,7 +32,7 @@ def time_series_to_frame(self): class SparseArrayConstructor: - params = ([0.1, 0.01], [0, np.nan], [np.int64, np.float64, np.object]) + params = ([0.1, 0.01], [0, np.nan], [np.int64, np.float64, object]) param_names = ["dense_proportion", "fill_value", "dtype"] def setup(self, dense_proportion, fill_value, dtype): diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index df6b44ac654ce..d4be9d802d697 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -1884,7 +1884,7 @@ Fallback behavior If the JSON serializer cannot handle the container contents directly it will fall back in the following manner: -* if the dtype is unsupported (e.g. ``np.complex``) then the ``default_handler``, if provided, will be called +* if the dtype is unsupported (e.g. ``np.complex_``) then the ``default_handler``, if provided, will be called for each value, otherwise an exception is raised. * if an object is unsupported it will attempt the following: diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index ad65f9707610b..e0e026fe7cb5e 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -178,7 +178,7 @@ cdef class StringVector: Py_ssize_t n object val - ao = np.empty(self.data.n, dtype=np.object) + ao = np.empty(self.data.n, dtype=object) for i in range(self.data.n): val = self.data.data[i] ao[i] = val diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 326ae36c6a12c..0cc0a6b192df5 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -94,7 +94,7 @@ cpdef value_count_{{dtype}}({{c_type}}[:] values, bint dropna): build_count_table_{{dtype}}(values, table, dropna) {{endif}} - result_keys = np.empty(table.n_occupied, dtype=np.{{dtype}}) + result_keys = np.empty(table.n_occupied, '{{dtype}}') result_counts = np.zeros(table.n_occupied, dtype=np.int64) {{if dtype == 'object'}} diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 461419239c730..6ffb036e01595 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -2037,7 +2037,7 @@ def _concatenate_chunks(list chunks): numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)} if len(numpy_dtypes) > 1: common_type = np.find_common_type(numpy_dtypes, []) - if common_type == np.object: + if common_type == object: warning_columns.append(str(name)) dtype = dtypes.pop() diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index d853ddf3de7d4..7c9575d921dc9 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -791,4 +791,4 @@ def make_mask_object_ndarray(ndarray[object, ndim=1] arr, object fill_value): if value == fill_value and type(value) == type(fill_value): mask[i] = 0 - return mask.view(dtype=np.bool) + return mask.view(dtype=bool) diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx index 9d3959d0a070a..ca18afebf410b 100644 --- a/pandas/_libs/testing.pyx +++ b/pandas/_libs/testing.pyx @@ -11,7 +11,7 @@ cdef NUMERIC_TYPES = ( bool, int, float, - np.bool, + np.bool_, np.int8, np.int16, np.int32, diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index d270a6431be56..dcf2015245518 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -171,7 +171,7 @@ def _ensure_data( return values, dtype # we have failed, return object - values = np.asarray(values, dtype=np.object) + values = np.asarray(values, dtype=object) return ensure_object(values), np.dtype("object") diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 9b89ec99e8df6..4996a10002c63 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -150,7 +150,7 @@ def _sparse_array_op( # to make template simple, cast here left_sp_values = left.sp_values.view(np.uint8) right_sp_values = right.sp_values.view(np.uint8) - result_dtype = np.bool + result_dtype = bool else: opname = f"sparse_{name}_{dtype}" left_sp_values = left.sp_values @@ -183,7 +183,7 @@ def _wrap_result(name, data, sparse_index, fill_value, dtype=None): name = name[2:-2] if name in ("eq", "ne", "lt", "gt", "le", "ge"): - dtype = np.bool + dtype = bool fill_value = lib.item_from_zerodim(fill_value) diff --git a/pandas/core/base.py b/pandas/core/base.py index bb1afc8f8ef20..e790b1d7f106e 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1520,7 +1520,7 @@ def drop_duplicates(self, keep="first"): def duplicated(self, keep="first"): if isinstance(self, ABCIndexClass): if self.is_unique: - return np.zeros(len(self), dtype=np.bool) + return np.zeros(len(self), dtype=bool) return duplicated(self, keep=keep) else: return self._constructor( diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 2a47a03b8d387..e69e3bab10af8 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -225,7 +225,7 @@ def trans(x): # if we have any nulls, then we are done return result - elif not isinstance(r[0], (np.integer, np.floating, np.bool, int, float, bool)): + elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)): # a comparable, e.g. a Decimal may slip in here return result @@ -315,7 +315,7 @@ def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj: from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import Int64Dtype - if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(np.bool)): + if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)): return np.dtype(np.int64) elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype): return Int64Dtype() @@ -597,7 +597,7 @@ def _ensure_dtype_type(value, dtype): """ Ensure that the given value is an instance of the given dtype. - e.g. if out dtype is np.complex64, we should have an instance of that + e.g. if out dtype is np.complex64_, we should have an instance of that as opposed to a python complex object. Parameters @@ -1483,7 +1483,7 @@ def find_common_type(types: List[DtypeObj]) -> DtypeObj: if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): - return np.object + return object return np.find_common_type(types, []) @@ -1742,7 +1742,7 @@ def validate_numeric_casting(dtype: np.dtype, value): if is_float(value) and np.isnan(value): raise ValueError("Cannot assign nan to integer series") - if issubclass(dtype.type, (np.integer, np.floating, np.complex)) and not issubclass( + if issubclass(dtype.type, (np.integer, np.floating, complex)) and not issubclass( dtype.type, np.bool_ ): if is_bool(value): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index a4a5ae1bfefff..9e960375e9bf4 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1354,7 +1354,7 @@ def is_bool_dtype(arr_or_dtype) -> bool: False >>> is_bool_dtype(bool) True - >>> is_bool_dtype(np.bool) + >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False @@ -1526,7 +1526,7 @@ def is_complex_dtype(arr_or_dtype) -> bool: False >>> is_complex_dtype(int) False - >>> is_complex_dtype(np.complex) + >>> is_complex_dtype(np.complex_) True >>> is_complex_dtype(np.array(['a', 'b'])) False diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9014e576eeb39..26770efb5c9f9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10024,7 +10024,7 @@ def describe( Including only string columns in a ``DataFrame`` description. - >>> df.describe(include=[np.object]) # doctest: +SKIP + >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 @@ -10051,7 +10051,7 @@ def describe( Excluding object columns from a ``DataFrame`` description. - >>> df.describe(exclude=[np.object]) # doctest: +SKIP + >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 904049923859d..48fdb14ebe90c 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1267,9 +1267,9 @@ def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]: if is_object_dtype(vals): vals = np.array([bool(x) for x in vals]) else: - vals = vals.astype(np.bool) + vals = vals.astype(bool) - return vals.view(np.uint8), np.bool + return vals.view(np.uint8), bool def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray: return result.astype(inference, copy=False) @@ -2059,7 +2059,7 @@ def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: vals = vals.to_numpy(dtype=float, na_value=np.nan) elif is_datetime64_dtype(vals.dtype): inference = "datetime64[ns]" - vals = np.asarray(vals).astype(np.float) + vals = np.asarray(vals).astype(float) return vals, inference diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c046d6465ce67..057adceda7efd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -374,7 +374,7 @@ def __new__( return UInt64Index(data, copy=copy, dtype=dtype, name=name) elif is_float_dtype(data.dtype): return Float64Index(data, copy=copy, dtype=dtype, name=name) - elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): + elif issubclass(data.dtype.type, bool) or is_bool_dtype(data): subarr = data.astype("object") else: subarr = com.asarray_tuplesafe(data, dtype=object) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e496694ee7899..eaf59051205d6 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1951,7 +1951,7 @@ def _check_comparison_types( if isinstance(result, np.ndarray): # The shape of the mask can differ to that of the result # since we may compare only a subset of a's or b's elements - tmp = np.zeros(mask.shape, dtype=np.bool) + tmp = np.zeros(mask.shape, dtype=np.bool_) tmp[mask] = result result = tmp diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 1d6e02254e44a..1b56b6d5a46fa 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -264,7 +264,7 @@ def hash_array( # First, turn whatever array this is into unsigned 64-bit ints, if we can # manage it. - elif isinstance(dtype, np.bool): + elif isinstance(dtype, bool): vals = vals.astype("u8") elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view("i8").astype("u8", copy=False) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c54e264faedd2..679cf4c2d8929 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -3476,13 +3476,13 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): # This will enable us to write `dtype[col_name]` # without worrying about KeyError issues later on. if not isinstance(dtype, dict): - # if dtype == None, default will be np.object. - default_dtype = dtype or np.object + # if dtype == None, default will be object. + default_dtype = dtype or object dtype = defaultdict(lambda: default_dtype) else: # Save a copy of the dictionary. _dtype = dtype.copy() - dtype = defaultdict(lambda: np.object) + dtype = defaultdict(lambda: object) # Convert column indexes to column names. for k, v in _dtype.items(): diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index c8f1336bcec60..3d9be7c15726b 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -685,7 +685,7 @@ def read(self, nrows=None): nd = self._column_types.count(b"d") ns = self._column_types.count(b"s") - self._string_chunk = np.empty((ns, nrows), dtype=np.object) + self._string_chunk = np.empty((ns, nrows), dtype=object) self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8) self._current_row_in_chunk_index = 0 diff --git a/pandas/io/stata.py b/pandas/io/stata.py index e9adf5292ef6f..7677d8a94d521 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -322,7 +322,7 @@ def convert_delta_safe(base, deltas, unit) -> Series: elif fmt.startswith(("%tC", "tC")): warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.") - conv_dates = Series(dates, dtype=np.object) + conv_dates = Series(dates, dtype=object) if has_bad_values: conv_dates[bad_locs] = NaT return conv_dates @@ -451,7 +451,7 @@ def g(x: datetime.datetime) -> int: conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 elif fmt in ["%th", "th"]: d = parse_dates_safe(dates, year=True) - conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(np.int) + conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int) elif fmt in ["%ty", "ty"]: d = parse_dates_safe(dates, year=True) conv_dates = d.year @@ -553,7 +553,7 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: ws = "" # original, if small, if large conversion_data = ( - (np.bool, np.int8, np.int8), + (np.bool_, np.int8, np.int8), (np.uint8, np.int8, np.int16), (np.uint16, np.int16, np.int32), (np.uint32, np.int32, np.int64), @@ -1725,7 +1725,7 @@ def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFra if convert_missing: # Replacement follows Stata notation missing_loc = np.nonzero(np.asarray(missing))[0] umissing, umissing_loc = np.unique(series[missing], return_inverse=True) - replacement = Series(series, dtype=np.object) + replacement = Series(series, dtype=object) for j, um in enumerate(umissing): missing_value = StataMissingValue(um) diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index ef8376bfef8a9..caf2f27de9276 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -301,7 +301,7 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): try: # first find out the ax layout, # so that we can correctly handle 'gaps" - layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool) + layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_) for ax in axarr: layout[row_num(ax), col_num(ax)] = ax.get_visible() diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index ccd03e841a40d..6c7b989bb9f2e 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -457,27 +457,27 @@ def test_pi_comp_period(self): ) f = lambda x: x == pd.Period("2011-03", freq="M") - exp = np.array([False, False, True, False], dtype=np.bool) + exp = np.array([False, False, True, False], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.Period("2011-03", freq="M") == x self._check(idx, f, exp) f = lambda x: x != pd.Period("2011-03", freq="M") - exp = np.array([True, True, False, True], dtype=np.bool) + exp = np.array([True, True, False, True], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.Period("2011-03", freq="M") != x self._check(idx, f, exp) f = lambda x: pd.Period("2011-03", freq="M") >= x - exp = np.array([True, True, True, False], dtype=np.bool) + exp = np.array([True, True, True, False], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: x > pd.Period("2011-03", freq="M") - exp = np.array([False, False, False, True], dtype=np.bool) + exp = np.array([False, False, False, True], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.Period("2011-03", freq="M") >= x - exp = np.array([True, True, True, False], dtype=np.bool) + exp = np.array([True, True, True, False], dtype=np.bool_) self._check(idx, f, exp) def test_pi_comp_period_nat(self): @@ -486,43 +486,43 @@ def test_pi_comp_period_nat(self): ) f = lambda x: x == pd.Period("2011-03", freq="M") - exp = np.array([False, False, True, False], dtype=np.bool) + exp = np.array([False, False, True, False], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.Period("2011-03", freq="M") == x self._check(idx, f, exp) f = lambda x: x == pd.NaT - exp = np.array([False, False, False, False], dtype=np.bool) + exp = np.array([False, False, False, False], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.NaT == x self._check(idx, f, exp) f = lambda x: x != pd.Period("2011-03", freq="M") - exp = np.array([True, True, False, True], dtype=np.bool) + exp = np.array([True, True, False, True], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.Period("2011-03", freq="M") != x self._check(idx, f, exp) f = lambda x: x != pd.NaT - exp = np.array([True, True, True, True], dtype=np.bool) + exp = np.array([True, True, True, True], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.NaT != x self._check(idx, f, exp) f = lambda x: pd.Period("2011-03", freq="M") >= x - exp = np.array([True, False, True, False], dtype=np.bool) + exp = np.array([True, False, True, False], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: x < pd.Period("2011-03", freq="M") - exp = np.array([True, False, False, False], dtype=np.bool) + exp = np.array([True, False, False, False], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: x > pd.NaT - exp = np.array([False, False, False, False], dtype=np.bool) + exp = np.array([False, False, False, False], dtype=np.bool_) self._check(idx, f, exp) f = lambda x: pd.NaT >= x - exp = np.array([False, False, False, False], dtype=np.bool) + exp = np.array([False, False, False, False], dtype=np.bool_) self._check(idx, f, exp) diff --git a/pandas/tests/arrays/boolean/test_logical.py b/pandas/tests/arrays/boolean/test_logical.py index bf4775bbd7b32..e79262e1b7934 100644 --- a/pandas/tests/arrays/boolean/test_logical.py +++ b/pandas/tests/arrays/boolean/test_logical.py @@ -14,8 +14,8 @@ def test_numpy_scalars_ok(self, all_logical_operators): a = pd.array([True, False, None], dtype="boolean") op = getattr(a, all_logical_operators) - tm.assert_extension_array_equal(op(True), op(np.bool(True))) - tm.assert_extension_array_equal(op(False), op(np.bool(False))) + tm.assert_extension_array_equal(op(True), op(np.bool_(True))) + tm.assert_extension_array_equal(op(False), op(np.bool_(False))) def get_op_from_name(self, op_name): short_opname = op_name.strip("_") diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py index 9922a8863ebc2..47ce9cb4089f9 100644 --- a/pandas/tests/arrays/categorical/test_dtypes.py +++ b/pandas/tests/arrays/categorical/test_dtypes.py @@ -127,11 +127,11 @@ def test_astype(self, ordered): tm.assert_numpy_array_equal(result, expected) result = cat.astype(int) - expected = np.array(cat, dtype=np.int) + expected = np.array(cat, dtype=int) tm.assert_numpy_array_equal(result, expected) result = cat.astype(float) - expected = np.array(cat, dtype=np.float) + expected = np.array(cat, dtype=float) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("dtype_ordered", [True, False]) diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 4ae1c1e6b63ce..c9f1dd7f589fc 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -53,7 +53,7 @@ def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op): def _check_bool_result(self, res): assert isinstance(res, self._klass) assert isinstance(res.dtype, SparseDtype) - assert res.dtype.subtype == np.bool + assert res.dtype.subtype == np.bool_ assert isinstance(res.fill_value, bool) def _check_comparison_ops(self, a, b, a_dense, b_dense): @@ -306,22 +306,22 @@ def test_int_array_comparison(self, kind): def test_bool_same_index(self, kind, fill_value): # GH 14000 # when sp_index are the same - values = self._base([True, False, True, True], dtype=np.bool) - rvalues = self._base([True, False, True, True], dtype=np.bool) + values = self._base([True, False, True, True], dtype=np.bool_) + rvalues = self._base([True, False, True, True], dtype=np.bool_) - a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value) - b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value) + a = self._klass(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = self._klass(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) self._check_logical_ops(a, b, values, rvalues) @pytest.mark.parametrize("fill_value", [True, False, np.nan]) def test_bool_array_logical(self, kind, fill_value): # GH 14000 # when sp_index are the same - values = self._base([True, False, True, False, True, True], dtype=np.bool) - rvalues = self._base([True, False, False, True, False, True], dtype=np.bool) + values = self._base([True, False, True, False, True, True], dtype=np.bool_) + rvalues = self._base([True, False, False, True, False, True], dtype=np.bool_) - a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value) - b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value) + a = self._klass(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = self._klass(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) self._check_logical_ops(a, b, values, rvalues) def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions): diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 8450253f853c3..2f2907fbaaebc 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -74,22 +74,22 @@ def test_constructor_sparse_dtype_str(self): def test_constructor_object_dtype(self): # GH 11856 - arr = SparseArray(["A", "A", np.nan, "B"], dtype=np.object) - assert arr.dtype == SparseDtype(np.object) + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object) + assert arr.dtype == SparseDtype(object) assert np.isnan(arr.fill_value) - arr = SparseArray(["A", "A", np.nan, "B"], dtype=np.object, fill_value="A") - assert arr.dtype == SparseDtype(np.object, "A") + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object, fill_value="A") + assert arr.dtype == SparseDtype(object, "A") assert arr.fill_value == "A" # GH 17574 data = [False, 0, 100.0, 0.0] - arr = SparseArray(data, dtype=np.object, fill_value=False) - assert arr.dtype == SparseDtype(np.object, False) + arr = SparseArray(data, dtype=object, fill_value=False) + assert arr.dtype == SparseDtype(object, False) assert arr.fill_value is False - arr_expected = np.array(data, dtype=np.object) + arr_expected = np.array(data, dtype=object) it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected)) - assert np.fromiter(it, dtype=np.bool).all() + assert np.fromiter(it, dtype=np.bool_).all() @pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int]) def test_constructor_na_dtype(self, dtype): @@ -445,15 +445,15 @@ def test_constructor_bool(self): def test_constructor_bool_fill_value(self): arr = SparseArray([True, False, True], dtype=None) - assert arr.dtype == SparseDtype(np.bool) + assert arr.dtype == SparseDtype(np.bool_) assert not arr.fill_value - arr = SparseArray([True, False, True], dtype=np.bool) - assert arr.dtype == SparseDtype(np.bool) + arr = SparseArray([True, False, True], dtype=np.bool_) + assert arr.dtype == SparseDtype(np.bool_) assert not arr.fill_value - arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True) - assert arr.dtype == SparseDtype(np.bool, True) + arr = SparseArray([True, False, True], dtype=np.bool_, fill_value=True) + assert arr.dtype == SparseDtype(np.bool_, True) assert arr.fill_value def test_constructor_float32(self): @@ -588,7 +588,7 @@ def test_set_fill_value(self): arr.fill_value = np.nan assert np.isnan(arr.fill_value) - arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool) + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) arr.fill_value = True assert arr.fill_value @@ -605,7 +605,7 @@ def test_set_fill_value(self): @pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)]) def test_set_fill_invalid_non_scalar(self, val): - arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool) + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) msg = "fill_value must be a scalar" with pytest.raises(ValueError, match=msg): @@ -625,7 +625,7 @@ def test_values_asarray(self): ([0, 0, 0, 0, 0], (5,), None), ([], (0,), None), ([0], (1,), None), - (["A", "A", np.nan, "B"], (4,), np.object), + (["A", "A", np.nan, "B"], (4,), object), ], ) def test_shape(self, data, shape, dtype): diff --git a/pandas/tests/dtypes/cast/test_find_common_type.py b/pandas/tests/dtypes/cast/test_find_common_type.py index ac7a5221d3469..8dac92f469703 100644 --- a/pandas/tests/dtypes/cast/test_find_common_type.py +++ b/pandas/tests/dtypes/cast/test_find_common_type.py @@ -11,7 +11,7 @@ ((np.int64,), np.int64), ((np.uint64,), np.uint64), ((np.float32,), np.float32), - ((np.object,), np.object), + ((object,), object), # Into ints. ((np.int16, np.int64), np.int64), ((np.int32, np.uint32), np.int64), @@ -25,20 +25,20 @@ ((np.float16, np.int64), np.float64), # Into others. ((np.complex128, np.int32), np.complex128), - ((np.object, np.float32), np.object), - ((np.object, np.int16), np.object), + ((object, np.float32), object), + ((object, np.int16), object), # Bool with int. - ((np.dtype("bool"), np.int64), np.object), - ((np.dtype("bool"), np.int32), np.object), - ((np.dtype("bool"), np.int16), np.object), - ((np.dtype("bool"), np.int8), np.object), - ((np.dtype("bool"), np.uint64), np.object), - ((np.dtype("bool"), np.uint32), np.object), - ((np.dtype("bool"), np.uint16), np.object), - ((np.dtype("bool"), np.uint8), np.object), + ((np.dtype("bool"), np.int64), object), + ((np.dtype("bool"), np.int32), object), + ((np.dtype("bool"), np.int16), object), + ((np.dtype("bool"), np.int8), object), + ((np.dtype("bool"), np.uint64), object), + ((np.dtype("bool"), np.uint32), object), + ((np.dtype("bool"), np.uint16), object), + ((np.dtype("bool"), np.uint8), object), # Bool with float. - ((np.dtype("bool"), np.float64), np.object), - ((np.dtype("bool"), np.float32), np.object), + ((np.dtype("bool"), np.float64), object), + ((np.dtype("bool"), np.float32), object), ( (np.dtype("datetime64[ns]"), np.dtype("datetime64[ns]")), np.dtype("datetime64[ns]"), @@ -55,8 +55,8 @@ (np.dtype("timedelta64[ms]"), np.dtype("timedelta64[ns]")), np.dtype("timedelta64[ns]"), ), - ((np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")), np.object), - ((np.dtype("datetime64[ns]"), np.int64), np.object), + ((np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")), object), + ((np.dtype("datetime64[ns]"), np.int64), object), ], ) def test_numpy_dtypes(source_dtypes, expected_common_dtype): @@ -72,7 +72,7 @@ def test_raises_empty_input(): "dtypes,exp_type", [ ([CategoricalDtype()], "category"), - ([np.object, CategoricalDtype()], np.object), + ([object, CategoricalDtype()], object), ([CategoricalDtype(), CategoricalDtype()], "category"), ], ) @@ -90,14 +90,14 @@ def test_datetimetz_dtype_match(): [ DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"), np.dtype("datetime64[ns]"), - np.object, + object, np.int64, ], ) def test_datetimetz_dtype_mismatch(dtype2): dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern") - assert find_common_type([dtype, dtype2]) == np.object - assert find_common_type([dtype2, dtype]) == np.object + assert find_common_type([dtype, dtype2]) == object + assert find_common_type([dtype2, dtype]) == object def test_period_dtype_match(): @@ -112,11 +112,11 @@ def test_period_dtype_match(): PeriodDtype(freq="2D"), PeriodDtype(freq="H"), np.dtype("datetime64[ns]"), - np.object, + object, np.int64, ], ) def test_period_dtype_mismatch(dtype2): dtype = PeriodDtype(freq="D") - assert find_common_type([dtype, dtype2]) == np.object - assert find_common_type([dtype2, dtype]) == np.object + assert find_common_type([dtype, dtype2]) == object + assert find_common_type([dtype2, dtype]) == object diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py index 2744cfa8ddc62..70d38aad951cc 100644 --- a/pandas/tests/dtypes/cast/test_infer_dtype.py +++ b/pandas/tests/dtypes/cast/test_infer_dtype.py @@ -43,7 +43,9 @@ def test_infer_dtype_from_float_scalar(float_dtype): assert dtype == float_dtype -@pytest.mark.parametrize("data,exp_dtype", [(12, np.int64), (np.float(12), np.float64)]) +@pytest.mark.parametrize( + "data,exp_dtype", [(12, np.int64), (np.float_(12), np.float64)] +) def test_infer_dtype_from_python_scalar(data, exp_dtype): dtype, val = infer_dtype_from_scalar(data) assert dtype == exp_dtype @@ -184,8 +186,8 @@ def test_infer_dtype_from_array(arr, expected, pandas_dtype): (1, np.int64), (1.1, np.float64), (Timestamp("2011-01-01"), "datetime64[ns]"), - (Timestamp("2011-01-01", tz="US/Eastern"), np.object), - (Period("2011-01-01", freq="D"), np.object), + (Timestamp("2011-01-01", tz="US/Eastern"), object), + (Period("2011-01-01", freq="D"), object), ], ) def test_cast_scalar_to_array(obj, dtype): diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 1708139a397ab..ce12718e48d0d 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -112,7 +112,7 @@ def test_period_dtype(self, dtype): period=PeriodDtype("D"), integer=np.dtype(np.int64), float=np.dtype(np.float64), - object=np.dtype(np.object), + object=np.dtype(object), category=com.pandas_dtype("category"), ) @@ -547,7 +547,7 @@ def test_is_bool_dtype(): assert not com.is_bool_dtype(pd.Index(["a", "b"])) assert com.is_bool_dtype(bool) - assert com.is_bool_dtype(np.bool) + assert com.is_bool_dtype(np.bool_) assert com.is_bool_dtype(np.array([True, False])) assert com.is_bool_dtype(pd.Index([True, False])) @@ -615,7 +615,8 @@ def test_is_complex_dtype(): assert not com.is_complex_dtype(pd.Series([1, 2])) assert not com.is_complex_dtype(np.array(["a", "b"])) - assert com.is_complex_dtype(np.complex) + assert com.is_complex_dtype(np.complex_) + assert com.is_complex_dtype(complex) assert com.is_complex_dtype(np.array([1 + 1j, 5])) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 3b9d3dc0b91f6..b1fe673e9e2f1 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -951,7 +951,7 @@ def test_registry_find(dtype, expected): (str, False), (int, False), (bool, True), - (np.bool, True), + (np.bool_, True), (np.array(["a", "b"]), False), (pd.Series([1, 2]), False), (np.array([True, False]), True), diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index e97716f7a5e9c..e40a12f7bc8d1 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1246,7 +1246,6 @@ def test_is_number(self): assert is_number(1) assert is_number(1.1) assert is_number(1 + 3j) - assert is_number(np.bool(False)) assert is_number(np.int64(1)) assert is_number(np.float64(1.1)) assert is_number(np.complex128(1 + 3j)) @@ -1267,7 +1266,7 @@ def test_is_number(self): def test_is_bool(self): assert is_bool(True) - assert is_bool(np.bool(False)) + assert is_bool(False) assert is_bool(np.bool_(False)) assert not is_bool(1) @@ -1294,7 +1293,7 @@ def test_is_integer(self): assert not is_integer(True) assert not is_integer(1.1) assert not is_integer(1 + 3j) - assert not is_integer(np.bool(False)) + assert not is_integer(False) assert not is_integer(np.bool_(False)) assert not is_integer(np.float64(1.1)) assert not is_integer(np.complex128(1 + 3j)) @@ -1317,7 +1316,7 @@ def test_is_float(self): assert not is_float(True) assert not is_float(1) assert not is_float(1 + 3j) - assert not is_float(np.bool(False)) + assert not is_float(False) assert not is_float(np.bool_(False)) assert not is_float(np.int64(1)) assert not is_float(np.complex128(1 + 3j)) diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index eed9a584cc030..bfa53ad02525b 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -319,13 +319,13 @@ def test_setitem_dataframe_column_without_index(self, data): def test_setitem_series_with_index(self, data): # https://github.com/pandas-dev/pandas/issues/32395 ser = expected = pd.Series(data, name="data") - result = pd.Series(index=ser.index, dtype=np.object, name="data") + result = pd.Series(index=ser.index, dtype=object, name="data") result.loc[ser.index] = ser self.assert_series_equal(result, expected) def test_setitem_series_without_index(self, data): # https://github.com/pandas-dev/pandas/issues/32395 ser = expected = pd.Series(data, name="data") - result = pd.Series(index=ser.index, dtype=np.object, name="data") + result = pd.Series(index=ser.index, dtype=object, name="data") result.loc[:] = ser self.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_duplicated.py b/pandas/tests/frame/methods/test_duplicated.py index 82fd6d88b82b9..7a1c16adc2a09 100644 --- a/pandas/tests/frame/methods/test_duplicated.py +++ b/pandas/tests/frame/methods/test_duplicated.py @@ -30,7 +30,7 @@ def test_duplicated_do_not_fail_on_wide_dataframes(): # calculation. Actual values doesn't matter here, though usually it's all # False in this case assert isinstance(result, Series) - assert result.dtype == np.bool + assert result.dtype == np.bool_ @pytest.mark.parametrize( diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py index 6307738021f68..79ea70a38f145 100644 --- a/pandas/tests/frame/methods/test_isin.py +++ b/pandas/tests/frame/methods/test_isin.py @@ -164,7 +164,7 @@ def test_isin_multiIndex(self): tm.assert_frame_equal(result, expected) df2.index = idx - expected = df2.values.astype(np.bool) + expected = df2.values.astype(bool) expected[:, 1] = ~expected[:, 1] expected = DataFrame(expected, columns=["A", "B"], index=idx) diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py index 34b323e55d8cd..d9c999c9119f4 100644 --- a/pandas/tests/frame/methods/test_to_records.py +++ b/pandas/tests/frame/methods/test_to_records.py @@ -163,7 +163,7 @@ def test_to_records_with_categorical(self): ), # Pass in a type instance. ( - dict(column_dtypes=np.unicode), + dict(column_dtypes=str), np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "", "<"]) def test_bool_uint(self, byteorder, version): - s0 = Series([0, 1, True], dtype=np.bool) + s0 = Series([0, 1, True], dtype=np.bool_) s1 = Series([0, 1, 100], dtype=np.uint8) s2 = Series([0, 1, 255], dtype=np.uint8) s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16) @@ -855,7 +855,7 @@ def test_big_dates(self): expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677, 10, 1) expected[5][5] = expected[5][6] = datetime(1678, 1, 1) - expected = DataFrame(expected, columns=columns, dtype=np.object) + expected = DataFrame(expected, columns=columns, dtype=object) parsed_115 = read_stata(self.dta18_115) parsed_117 = read_stata(self.dta18_117) tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True) diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 5a30e9fbb91c6..0d3425d001229 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -205,7 +205,7 @@ def test_hist_df_legacy(self): def test_hist_non_numerical_raises(self): # gh-10444 df = DataFrame(np.random.rand(10, 2)) - df_o = df.astype(np.object) + df_o = df.astype(object) msg = "hist method requires numerical columns, nothing to plot." with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 5341878d4986e..6da892c15f489 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -617,7 +617,7 @@ def test_kde_kwargs(self): sample_points = np.linspace(-100, 100, 20) _check_plot_works(self.ts.plot.kde, bw_method="scott", ind=20) _check_plot_works(self.ts.plot.kde, bw_method=None, ind=20) - _check_plot_works(self.ts.plot.kde, bw_method=None, ind=np.int(20)) + _check_plot_works(self.ts.plot.kde, bw_method=None, ind=np.int_(20)) _check_plot_works(self.ts.plot.kde, bw_method=0.5, ind=sample_points) _check_plot_works(self.ts.plot.density, bw_method=0.5, ind=sample_points) _, ax = self.plt.subplots() diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 485535bec20d0..28d33ebb23c20 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -180,7 +180,7 @@ def test_resample_size_empty_dataframe(freq, empty_frame_dti): @pytest.mark.parametrize("index", tm.all_timeseries_index_generator(0)) -@pytest.mark.parametrize("dtype", [np.float, np.int, np.object, "datetime64[ns]"]) +@pytest.mark.parametrize("dtype", [float, int, object, "datetime64[ns]"]) def test_resample_empty_dtypes(index, dtype, resample_method): # Empty series were sometimes causing a segfault (for the functions # with Cython bounds-checking disabled) or an IndexError. We just run diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 19fd8db5322ed..1c9d00a4b4c90 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2759,8 +2759,8 @@ def test_concat_sparse(): def test_concat_dense_sparse(): # GH 30668 - a = pd.Series(pd.arrays.SparseArray([1, None]), dtype=np.float) - b = pd.Series([1], dtype=np.float) + a = pd.Series(pd.arrays.SparseArray([1, None]), dtype=float) + b = pd.Series([1], dtype=float) expected = pd.Series(data=[1, None, 1], index=[0, 1, 0]).astype( pd.SparseDtype(np.float64, None) ) diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py index 8daea84492871..3f85abb4b2817 100644 --- a/pandas/tests/series/indexing/test_where.py +++ b/pandas/tests/series/indexing/test_where.py @@ -278,7 +278,7 @@ def test_where_setitem_invalid(): "mask", [[True, False, False, False, False], [True, False], [False]] ) @pytest.mark.parametrize( - "item", [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min] + "item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min] ) # Test numpy arrays, lists and tuples as the input to be # broadcast diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index e6f86dda05893..d51dceae53a1c 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -180,7 +180,7 @@ def test_apply_categorical(self): result = ser.apply(lambda x: "A") exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg")) tm.assert_series_equal(result, exp) - assert result.dtype == np.object + assert result.dtype == object @pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]]) def test_apply_categorical_with_nan_values(self, series): @@ -717,7 +717,7 @@ def test_map_categorical(self): result = s.map(lambda x: "A") exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg")) tm.assert_series_equal(result, exp) - assert result.dtype == np.object + assert result.dtype == object with pytest.raises(NotImplementedError): s.map(lambda x: x, na_action="ignore") diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index 0766bfc37d7ca..95eba6ccc4df8 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -68,9 +68,9 @@ def get_result_type(dtype, dtype2): (np.bool_, np.int32, np.int32), (np.bool_, np.float32, np.object_), # datetime-like - ("m8[ns]", np.bool, np.object_), + ("m8[ns]", np.bool_, np.object_), ("m8[ns]", np.int64, np.object_), - ("M8[ns]", np.bool, np.object_), + ("M8[ns]", np.bool_, np.object_), ("M8[ns]", np.int64, np.object_), # categorical ("category", "category", "category"), diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index ff5f890cc41f8..44a8452964f5a 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -713,7 +713,7 @@ def test_first_nan_kept(self): NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0] assert NAN1 != NAN1 assert NAN2 != NAN2 - for el_type in [np.float64, np.object]: + for el_type in [np.float64, object]: a = np.array([NAN1, NAN2], dtype=el_type) result = pd.unique(a) assert result.size == 1 @@ -725,7 +725,7 @@ def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixtur # GH 22295 if unique_nulls_fixture is unique_nulls_fixture2: return # skip it, values not unique - a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=np.object) + a = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object) result = pd.unique(a) assert result.size == 2 assert a[0] is unique_nulls_fixture @@ -886,7 +886,7 @@ def test_different_nans(self): # as object-array: result = algos.isin( - np.asarray(comps, dtype=np.object), np.asarray(values, dtype=np.object) + np.asarray(comps, dtype=object), np.asarray(values, dtype=object) ) tm.assert_numpy_array_equal(np.array([True]), result) @@ -916,8 +916,8 @@ def test_empty(self, empty): def test_different_nan_objects(self): # GH 22119 - comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=np.object) - vals = np.array([float("nan")], dtype=np.object) + comps = np.array(["nan", np.nan * 1j, float("nan")], dtype=object) + vals = np.array([float("nan")], dtype=object) expected = np.array([False, False, True]) result = algos.isin(comps, vals) tm.assert_numpy_array_equal(expected, result) @@ -1157,7 +1157,7 @@ def test_dropna(self): def test_value_counts_normalized(self): # GH12558 s = Series([1, 2, np.nan, np.nan, np.nan]) - dtypes = (np.float64, np.object, "M8[ns]") + dtypes = (np.float64, object, "M8[ns]") for t in dtypes: s_typed = s.astype(t) result = s_typed.value_counts(normalize=True, dropna=False) @@ -2290,10 +2290,10 @@ def test_mode_single(self): exp = Series(exp_multi, dtype=dt) tm.assert_series_equal(algos.mode(s), exp) - exp = Series([1], dtype=np.int) + exp = Series([1], dtype=int) tm.assert_series_equal(algos.mode([1]), exp) - exp = Series(["a", "b", "c"], dtype=np.object) + exp = Series(["a", "b", "c"], dtype=object) tm.assert_series_equal(algos.mode(["a", "b", "c"]), exp) def test_number_mode(self): diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index 2a42eb5d73136..9f0632917037c 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -31,7 +31,7 @@ def writeable(request): (np.int16, False), (np.int8, False), (np.object_, True), - (np.bool, False), + (np.bool_, False), ] ) def dtype_can_hold_na(request): diff --git a/pandas/tests/tslibs/test_fields.py b/pandas/tests/tslibs/test_fields.py index 943f4207df543..a45fcab56759f 100644 --- a/pandas/tests/tslibs/test_fields.py +++ b/pandas/tests/tslibs/test_fields.py @@ -12,9 +12,7 @@ def test_fields_readonly(): dtindex.flags.writeable = False result = fields.get_date_name_field(dtindex, "month_name") - expected = np.array( - ["January", "February", "March", "April", "May"], dtype=np.object - ) + expected = np.array(["January", "February", "March", "April", "May"], dtype=object) tm.assert_numpy_array_equal(result, expected) result = fields.get_date_field(dtindex, "Y") diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py index 3e5475e6b274f..f6e2834965da3 100644 --- a/pandas/tests/window/moments/test_moments_rolling.py +++ b/pandas/tests/window/moments/test_moments_rolling.py @@ -515,7 +515,7 @@ def test_cmov_window_regular(win_types): @td.skip_if_no_scipy def test_cmov_window_regular_linear_range(win_types): # GH 8238 - vals = np.array(range(10), dtype=np.float) + vals = np.array(range(10), dtype=float) xp = vals.copy() xp[:2] = np.nan xp[-2:] = np.nan @@ -718,7 +718,7 @@ def test_cmov_window_special_linear_range(win_types_special): "exponential": {"tau": 10}, } - vals = np.array(range(10), dtype=np.float) + vals = np.array(range(10), dtype=float) xp = vals.copy() xp[:2] = np.nan xp[-2:] = np.nan