diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index c036dc09948d8..74c24f263c3da 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -905,10 +905,9 @@ def _cmp_method(self, other, op): # We have to use comp_method_OBJECT_ARRAY instead of numpy # comparison otherwise it would fail to raise when # comparing tz-aware and tz-naive - with np.errstate(all="ignore"): - result = ops.comp_method_OBJECT_ARRAY( - op, np.asarray(self.astype(object)), other - ) + result = ops.comp_method_OBJECT_ARRAY( + op, np.asarray(self.astype(object)), other + ) return result if other is NaT: diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index aa3516c3ecb4f..0ea3732b36283 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -755,9 +755,8 @@ def _cmp_method(self, other, op) -> BooleanArray: # behavior today, so that should be fine to ignore. warnings.filterwarnings("ignore", "elementwise", FutureWarning) warnings.filterwarnings("ignore", "elementwise", DeprecationWarning) - with np.errstate(all="ignore"): - method = getattr(self._data, f"__{op.__name__}__") - result = method(other) + method = getattr(self._data, f"__{op.__name__}__") + result = method(other) if result is NotImplemented: result = invalid_comparison(self._data, other, op) diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index b8ba461d2806a..c0a7da8eece5f 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -447,8 +447,7 @@ def _cmp_method(self, other, op): other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) pd_op = ops.get_array_op(op) other = ensure_wrapped_if_datetimelike(other) - with np.errstate(all="ignore"): - result = pd_op(self._ndarray, other) + result = pd_op(self._ndarray, other) if op is divmod or op is ops.rdivmod: a, b = result diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index f55fde9c75e4b..6310de4678f7f 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1756,10 +1756,9 @@ def _cmp_method(self, other, op) -> SparseArray: return _sparse_array_op(self, other, op, op_name) else: # scalar - with np.errstate(all="ignore"): - fill_value = op(self.fill_value, other) - result = np.full(len(self), fill_value, dtype=np.bool_) - result[self.sp_index.indices] = op(self.sp_values, other) + fill_value = op(self.fill_value, other) + result = np.full(len(self), fill_value, dtype=np.bool_) + result[self.sp_index.indices] = op(self.sp_values, other) return type(self)( result, diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 0538cc7b8d4ed..dd7f9c3f76049 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -601,8 +601,7 @@ def __init__(self, func, args) -> None: def __call__(self, env): # error: "Op" not callable operands = [op(env) for op in self.operands] # type: ignore[operator] - with np.errstate(all="ignore"): - return self.func.func(*operands) + return self.func.func(*operands) def __repr__(self) -> str: operands = map(str, self.operands) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c2017c0acc55e..74873abac0758 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7433,7 +7433,8 @@ def _arith_method(self, other, op): self, other = self._align_for_op(other, axis, flex=True, level=None) - new_data = self._dispatch_frame_op(other, op, axis=axis) + with np.errstate(all="ignore"): + new_data = self._dispatch_frame_op(other, op, axis=axis) return self._construct_result(new_data) _logical_method = _arith_method @@ -7454,6 +7455,10 @@ def _dispatch_frame_op( Returns ------- DataFrame + + Notes + ----- + Caller is responsible for setting np.errstate where relevant. """ # Get the appropriate array-op to apply to each column/block's values. array_op = ops.get_array_op(func) @@ -7461,8 +7466,7 @@ def _dispatch_frame_op( right = lib.item_from_zerodim(right) if not is_list_like(right): # i.e. scalar, faster than checking np.ndim(right) == 0 - with np.errstate(all="ignore"): - bm = self._mgr.apply(array_op, right=right) + bm = self._mgr.apply(array_op, right=right) return self._constructor(bm) elif isinstance(right, DataFrame): @@ -7473,17 +7477,16 @@ def _dispatch_frame_op( # _frame_arith_method_with_reindex # TODO operate_blockwise expects a manager of the same type - with np.errstate(all="ignore"): - bm = self._mgr.operate_blockwise( - # error: Argument 1 to "operate_blockwise" of "ArrayManager" has - # incompatible type "Union[ArrayManager, BlockManager]"; expected - # "ArrayManager" - # error: Argument 1 to "operate_blockwise" of "BlockManager" has - # incompatible type "Union[ArrayManager, BlockManager]"; expected - # "BlockManager" - right._mgr, # type: ignore[arg-type] - array_op, - ) + bm = self._mgr.operate_blockwise( + # error: Argument 1 to "operate_blockwise" of "ArrayManager" has + # incompatible type "Union[ArrayManager, BlockManager]"; expected + # "ArrayManager" + # error: Argument 1 to "operate_blockwise" of "BlockManager" has + # incompatible type "Union[ArrayManager, BlockManager]"; expected + # "BlockManager" + right._mgr, # type: ignore[arg-type] + array_op, + ) return self._constructor(bm) elif isinstance(right, Series) and axis == 1: @@ -7494,18 +7497,16 @@ def _dispatch_frame_op( # maybe_align_as_frame ensures we do not have an ndarray here assert not isinstance(right, np.ndarray) - with np.errstate(all="ignore"): - arrays = [ - array_op(_left, _right) - for _left, _right in zip(self._iter_column_arrays(), right) - ] + arrays = [ + array_op(_left, _right) + for _left, _right in zip(self._iter_column_arrays(), right) + ] elif isinstance(right, Series): assert right.index.equals(self.index) right = right._values - with np.errstate(all="ignore"): - arrays = [array_op(left, right) for left in self._iter_column_arrays()] + arrays = [array_op(left, right) for left in self._iter_column_arrays()] else: raise NotImplementedError(right) @@ -7784,18 +7785,19 @@ def _flex_arith_method( ) self, other = self._align_for_op(other, axis, flex=True, level=level) - if isinstance(other, DataFrame): - # Another DataFrame - new_data = self._combine_frame(other, op, fill_value) + with np.errstate(all="ignore"): + if isinstance(other, DataFrame): + # Another DataFrame + new_data = self._combine_frame(other, op, fill_value) - elif isinstance(other, Series): - new_data = self._dispatch_frame_op(other, op, axis=axis) - else: - # in this case we always have `np.ndim(other) == 0` - if fill_value is not None: - self = self.fillna(fill_value) + elif isinstance(other, Series): + new_data = self._dispatch_frame_op(other, op, axis=axis) + else: + # in this case we always have `np.ndim(other) == 0` + if fill_value is not None: + self = self.fillna(fill_value) - new_data = self._dispatch_frame_op(other, op) + new_data = self._dispatch_frame_op(other, op) return self._construct_result(new_data) @@ -8477,8 +8479,7 @@ def update( that = other[col]._values if filter_func is not None: - with np.errstate(all="ignore"): - mask = ~filter_func(this) | isna(that) + mask = ~filter_func(this) | isna(that) else: if errors == "raise": mask_this = notna(that) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 29150c658b9e7..d02c92a442ce3 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1468,8 +1468,7 @@ def apply(self, func, *args, **kwargs) -> NDFrameT: @wraps(func) def f(g): - with np.errstate(all="ignore"): - return func(g, *args, **kwargs) + return func(g, *args, **kwargs) else: raise ValueError( diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f9ce0fa1e6ee4..572bd2bcdf814 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6788,20 +6788,17 @@ def _cmp_method(self, other, op): if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical - with np.errstate(all="ignore"): - result = op(self._values, other) + result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex - with np.errstate(all="ignore"): - result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) + result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: - with np.errstate(all="ignore"): - result = ops.comparison_op(self._values, other, op) + result = ops.comparison_op(self._values, other, op) return result diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 137fe67630968..c34211ff3d525 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -90,8 +90,7 @@ def _f(*args, **kwargs): f"reduction operation '{f_name}' not allowed for this dtype" ) try: - with np.errstate(invalid="ignore"): - return f(*args, **kwargs) + return f(*args, **kwargs) except ValueError as e: # we want to transform an object array # ValueError message to the more typical TypeError @@ -1239,7 +1238,8 @@ def nanskew( elif not skipna and mask is not None and mask.any(): return np.nan - mean = values.sum(axis, dtype=np.float64) / count + with np.errstate(invalid="ignore", divide="ignore"): + mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) @@ -1326,7 +1326,8 @@ def nankurt( elif not skipna and mask is not None and mask.any(): return np.nan - mean = values.sum(axis, dtype=np.float64) / count + with np.errstate(invalid="ignore", divide="ignore"): + mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) @@ -1567,8 +1568,7 @@ def check_below_min_count( def _zero_out_fperr(arg): # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): - with np.errstate(invalid="ignore"): - return np.where(np.abs(arg) < 1e-14, 0, arg) + return np.where(np.abs(arg) < 1e-14, 0, arg) else: return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg @@ -1703,8 +1703,7 @@ def f(x, y): ymask = isna(y) mask = xmask | ymask - with np.errstate(all="ignore"): - result = op(x, y) + result = op(x, y) if mask.any(): if is_bool_dtype(result): diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 0866c44d62ca0..5a2ec11ac2c15 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -119,9 +119,8 @@ def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray: x_lt0 = x < 0 x_gt0 = x > 0 nan_mask = zmask & (x == 0) - with np.errstate(invalid="ignore"): - neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0) - posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0) + neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0) + posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0) if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 38aa0d97f9c8a..c11315527b3b5 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1320,16 +1320,16 @@ def _maybe_coerce_merge_keys(self) -> None: # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" casted = lk.astype(rk.dtype) # type: ignore[arg-type] - mask = ~np.isnan(lk) - match = lk == casted - if not match[mask].all(): - warnings.warn( - "You are merging on int and float " - "columns where the float values " - "are not equal to their int representation.", - UserWarning, - stacklevel=find_stack_level(), - ) + mask = ~np.isnan(lk) + match = lk == casted + if not match[mask].all(): + warnings.warn( + "You are merging on int and float " + "columns where the float values " + "are not equal to their int representation.", + UserWarning, + stacklevel=find_stack_level(), + ) continue if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype): @@ -1340,16 +1340,16 @@ def _maybe_coerce_merge_keys(self) -> None: # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" casted = rk.astype(lk.dtype) # type: ignore[arg-type] - mask = ~np.isnan(rk) - match = rk == casted - if not match[mask].all(): - warnings.warn( - "You are merging on int and float " - "columns where the float values " - "are not equal to their int representation.", - UserWarning, - stacklevel=find_stack_level(), - ) + mask = ~np.isnan(rk) + match = rk == casted + if not match[mask].all(): + warnings.warn( + "You are merging on int and float " + "columns where the float values " + "are not equal to their int representation.", + UserWarning, + stacklevel=find_stack_level(), + ) continue # let's infer and see if we are ok diff --git a/pandas/core/series.py b/pandas/core/series.py index e4a976fdb0c13..22c8d8b047280 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3181,10 +3181,10 @@ def combine( new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) - for i, idx in enumerate(new_index): - lv = self.get(idx, fill_value) - rv = other.get(idx, fill_value) - with np.errstate(all="ignore"): + with np.errstate(all="ignore"): + for i, idx in enumerate(new_index): + lv = self.get(idx, fill_value) + rv = other.get(idx, fill_value) new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for @@ -4564,8 +4564,7 @@ def _reduce( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) - with np.errstate(all="ignore"): - return op(delegate, skipna=skipna, **kwds) + return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, @@ -5772,8 +5771,7 @@ def _cmp_method(self, other, op): lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) - with np.errstate(all="ignore"): - res_values = ops.comparison_op(lvalues, rvalues, op) + res_values = ops.comparison_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 0bd8769b5de60..4e2da746e0803 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1591,15 +1591,12 @@ def format_values_with(float_format): else: too_long = False - with np.errstate(invalid="ignore"): - abs_vals = np.abs(self.values) - # this is pretty arbitrary for now - # large values: more that 8 characters including decimal symbol - # and first digit, hence > 1e6 - has_large_values = (abs_vals > 1e6).any() - has_small_values = ( - (abs_vals < 10 ** (-self.digits)) & (abs_vals > 0) - ).any() + abs_vals = np.abs(self.values) + # this is pretty arbitrary for now + # large values: more that 8 characters including decimal symbol + # and first digit, hence > 1e6 + has_large_values = (abs_vals > 1e6).any() + has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any() if has_small_values or (too_long and has_large_values): if self.leading_space is True: @@ -1722,13 +1719,12 @@ def format_percentiles( percentiles = np.asarray(percentiles) # It checks for np.NaN as well - with np.errstate(invalid="ignore"): - if ( - not is_numeric_dtype(percentiles) - or not np.all(percentiles >= 0) - or not np.all(percentiles <= 1) - ): - raise ValueError("percentiles should all be in the interval [0,1]") + if ( + not is_numeric_dtype(percentiles) + or not np.all(percentiles >= 0) + or not np.all(percentiles <= 1) + ): + raise ValueError("percentiles should all be in the interval [0,1]") percentiles = 100 * percentiles percentiles_round_type = percentiles.round().astype(int)