Skip to content

Commit 5555c51

Browse files
authored
PERF: call np.errstate less (#52366)
* PERF: use np.errstate less * PERF: less np.errstate * less np.errstate
1 parent 66f3f59 commit 5555c51

File tree

13 files changed

+95
-111
lines changed

13 files changed

+95
-111
lines changed

pandas/core/arrays/datetimelike.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -905,10 +905,9 @@ def _cmp_method(self, other, op):
905905
# We have to use comp_method_OBJECT_ARRAY instead of numpy
906906
# comparison otherwise it would fail to raise when
907907
# comparing tz-aware and tz-naive
908-
with np.errstate(all="ignore"):
909-
result = ops.comp_method_OBJECT_ARRAY(
910-
op, np.asarray(self.astype(object)), other
911-
)
908+
result = ops.comp_method_OBJECT_ARRAY(
909+
op, np.asarray(self.astype(object)), other
910+
)
912911
return result
913912

914913
if other is NaT:

pandas/core/arrays/masked.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -755,9 +755,8 @@ def _cmp_method(self, other, op) -> BooleanArray:
755755
# behavior today, so that should be fine to ignore.
756756
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
757757
warnings.filterwarnings("ignore", "elementwise", DeprecationWarning)
758-
with np.errstate(all="ignore"):
759-
method = getattr(self._data, f"__{op.__name__}__")
760-
result = method(other)
758+
method = getattr(self._data, f"__{op.__name__}__")
759+
result = method(other)
761760

762761
if result is NotImplemented:
763762
result = invalid_comparison(self._data, other, op)

pandas/core/arrays/numpy_.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -447,8 +447,7 @@ def _cmp_method(self, other, op):
447447
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
448448
pd_op = ops.get_array_op(op)
449449
other = ensure_wrapped_if_datetimelike(other)
450-
with np.errstate(all="ignore"):
451-
result = pd_op(self._ndarray, other)
450+
result = pd_op(self._ndarray, other)
452451

453452
if op is divmod or op is ops.rdivmod:
454453
a, b = result

pandas/core/arrays/sparse/array.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -1756,10 +1756,9 @@ def _cmp_method(self, other, op) -> SparseArray:
17561756
return _sparse_array_op(self, other, op, op_name)
17571757
else:
17581758
# scalar
1759-
with np.errstate(all="ignore"):
1760-
fill_value = op(self.fill_value, other)
1761-
result = np.full(len(self), fill_value, dtype=np.bool_)
1762-
result[self.sp_index.indices] = op(self.sp_values, other)
1759+
fill_value = op(self.fill_value, other)
1760+
result = np.full(len(self), fill_value, dtype=np.bool_)
1761+
result[self.sp_index.indices] = op(self.sp_values, other)
17631762

17641763
return type(self)(
17651764
result,

pandas/core/computation/ops.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -601,8 +601,7 @@ def __init__(self, func, args) -> None:
601601
def __call__(self, env):
602602
# error: "Op" not callable
603603
operands = [op(env) for op in self.operands] # type: ignore[operator]
604-
with np.errstate(all="ignore"):
605-
return self.func.func(*operands)
604+
return self.func.func(*operands)
606605

607606
def __repr__(self) -> str:
608607
operands = map(str, self.operands)

pandas/core/frame.py

+34-33
Original file line numberDiff line numberDiff line change
@@ -7433,7 +7433,8 @@ def _arith_method(self, other, op):
74337433

74347434
self, other = self._align_for_op(other, axis, flex=True, level=None)
74357435

7436-
new_data = self._dispatch_frame_op(other, op, axis=axis)
7436+
with np.errstate(all="ignore"):
7437+
new_data = self._dispatch_frame_op(other, op, axis=axis)
74377438
return self._construct_result(new_data)
74387439

74397440
_logical_method = _arith_method
@@ -7454,15 +7455,18 @@ def _dispatch_frame_op(
74547455
Returns
74557456
-------
74567457
DataFrame
7458+
7459+
Notes
7460+
-----
7461+
Caller is responsible for setting np.errstate where relevant.
74577462
"""
74587463
# Get the appropriate array-op to apply to each column/block's values.
74597464
array_op = ops.get_array_op(func)
74607465

74617466
right = lib.item_from_zerodim(right)
74627467
if not is_list_like(right):
74637468
# i.e. scalar, faster than checking np.ndim(right) == 0
7464-
with np.errstate(all="ignore"):
7465-
bm = self._mgr.apply(array_op, right=right)
7469+
bm = self._mgr.apply(array_op, right=right)
74667470
return self._constructor(bm)
74677471

74687472
elif isinstance(right, DataFrame):
@@ -7473,17 +7477,16 @@ def _dispatch_frame_op(
74737477
# _frame_arith_method_with_reindex
74747478

74757479
# TODO operate_blockwise expects a manager of the same type
7476-
with np.errstate(all="ignore"):
7477-
bm = self._mgr.operate_blockwise(
7478-
# error: Argument 1 to "operate_blockwise" of "ArrayManager" has
7479-
# incompatible type "Union[ArrayManager, BlockManager]"; expected
7480-
# "ArrayManager"
7481-
# error: Argument 1 to "operate_blockwise" of "BlockManager" has
7482-
# incompatible type "Union[ArrayManager, BlockManager]"; expected
7483-
# "BlockManager"
7484-
right._mgr, # type: ignore[arg-type]
7485-
array_op,
7486-
)
7480+
bm = self._mgr.operate_blockwise(
7481+
# error: Argument 1 to "operate_blockwise" of "ArrayManager" has
7482+
# incompatible type "Union[ArrayManager, BlockManager]"; expected
7483+
# "ArrayManager"
7484+
# error: Argument 1 to "operate_blockwise" of "BlockManager" has
7485+
# incompatible type "Union[ArrayManager, BlockManager]"; expected
7486+
# "BlockManager"
7487+
right._mgr, # type: ignore[arg-type]
7488+
array_op,
7489+
)
74877490
return self._constructor(bm)
74887491

74897492
elif isinstance(right, Series) and axis == 1:
@@ -7494,18 +7497,16 @@ def _dispatch_frame_op(
74947497
# maybe_align_as_frame ensures we do not have an ndarray here
74957498
assert not isinstance(right, np.ndarray)
74967499

7497-
with np.errstate(all="ignore"):
7498-
arrays = [
7499-
array_op(_left, _right)
7500-
for _left, _right in zip(self._iter_column_arrays(), right)
7501-
]
7500+
arrays = [
7501+
array_op(_left, _right)
7502+
for _left, _right in zip(self._iter_column_arrays(), right)
7503+
]
75027504

75037505
elif isinstance(right, Series):
75047506
assert right.index.equals(self.index)
75057507
right = right._values
75067508

7507-
with np.errstate(all="ignore"):
7508-
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
7509+
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
75097510

75107511
else:
75117512
raise NotImplementedError(right)
@@ -7784,18 +7785,19 @@ def _flex_arith_method(
77847785
)
77857786
self, other = self._align_for_op(other, axis, flex=True, level=level)
77867787

7787-
if isinstance(other, DataFrame):
7788-
# Another DataFrame
7789-
new_data = self._combine_frame(other, op, fill_value)
7788+
with np.errstate(all="ignore"):
7789+
if isinstance(other, DataFrame):
7790+
# Another DataFrame
7791+
new_data = self._combine_frame(other, op, fill_value)
77907792

7791-
elif isinstance(other, Series):
7792-
new_data = self._dispatch_frame_op(other, op, axis=axis)
7793-
else:
7794-
# in this case we always have `np.ndim(other) == 0`
7795-
if fill_value is not None:
7796-
self = self.fillna(fill_value)
7793+
elif isinstance(other, Series):
7794+
new_data = self._dispatch_frame_op(other, op, axis=axis)
7795+
else:
7796+
# in this case we always have `np.ndim(other) == 0`
7797+
if fill_value is not None:
7798+
self = self.fillna(fill_value)
77977799

7798-
new_data = self._dispatch_frame_op(other, op)
7800+
new_data = self._dispatch_frame_op(other, op)
77997801

78007802
return self._construct_result(new_data)
78017803

@@ -8477,8 +8479,7 @@ def update(
84778479
that = other[col]._values
84788480

84798481
if filter_func is not None:
8480-
with np.errstate(all="ignore"):
8481-
mask = ~filter_func(this) | isna(that)
8482+
mask = ~filter_func(this) | isna(that)
84828483
else:
84838484
if errors == "raise":
84848485
mask_this = notna(that)

pandas/core/groupby/groupby.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1468,8 +1468,7 @@ def apply(self, func, *args, **kwargs) -> NDFrameT:
14681468

14691469
@wraps(func)
14701470
def f(g):
1471-
with np.errstate(all="ignore"):
1472-
return func(g, *args, **kwargs)
1471+
return func(g, *args, **kwargs)
14731472

14741473
else:
14751474
raise ValueError(

pandas/core/indexes/base.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -6788,20 +6788,17 @@ def _cmp_method(self, other, op):
67886788

67896789
if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):
67906790
# e.g. PeriodArray, Categorical
6791-
with np.errstate(all="ignore"):
6792-
result = op(self._values, other)
6791+
result = op(self._values, other)
67936792

67946793
elif isinstance(self._values, ExtensionArray):
67956794
result = op(self._values, other)
67966795

67976796
elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex):
67986797
# don't pass MultiIndex
6799-
with np.errstate(all="ignore"):
6800-
result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)
6798+
result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)
68016799

68026800
else:
6803-
with np.errstate(all="ignore"):
6804-
result = ops.comparison_op(self._values, other, op)
6801+
result = ops.comparison_op(self._values, other, op)
68056802

68066803
return result
68076804

pandas/core/nanops.py

+7-8
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,7 @@ def _f(*args, **kwargs):
9090
f"reduction operation '{f_name}' not allowed for this dtype"
9191
)
9292
try:
93-
with np.errstate(invalid="ignore"):
94-
return f(*args, **kwargs)
93+
return f(*args, **kwargs)
9594
except ValueError as e:
9695
# we want to transform an object array
9796
# ValueError message to the more typical TypeError
@@ -1239,7 +1238,8 @@ def nanskew(
12391238
elif not skipna and mask is not None and mask.any():
12401239
return np.nan
12411240

1242-
mean = values.sum(axis, dtype=np.float64) / count
1241+
with np.errstate(invalid="ignore", divide="ignore"):
1242+
mean = values.sum(axis, dtype=np.float64) / count
12431243
if axis is not None:
12441244
mean = np.expand_dims(mean, axis)
12451245

@@ -1326,7 +1326,8 @@ def nankurt(
13261326
elif not skipna and mask is not None and mask.any():
13271327
return np.nan
13281328

1329-
mean = values.sum(axis, dtype=np.float64) / count
1329+
with np.errstate(invalid="ignore", divide="ignore"):
1330+
mean = values.sum(axis, dtype=np.float64) / count
13301331
if axis is not None:
13311332
mean = np.expand_dims(mean, axis)
13321333

@@ -1567,8 +1568,7 @@ def check_below_min_count(
15671568
def _zero_out_fperr(arg):
15681569
# #18044 reference this behavior to fix rolling skew/kurt issue
15691570
if isinstance(arg, np.ndarray):
1570-
with np.errstate(invalid="ignore"):
1571-
return np.where(np.abs(arg) < 1e-14, 0, arg)
1571+
return np.where(np.abs(arg) < 1e-14, 0, arg)
15721572
else:
15731573
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
15741574

@@ -1703,8 +1703,7 @@ def f(x, y):
17031703
ymask = isna(y)
17041704
mask = xmask | ymask
17051705

1706-
with np.errstate(all="ignore"):
1707-
result = op(x, y)
1706+
result = op(x, y)
17081707

17091708
if mask.any():
17101709
if is_bool_dtype(result):

pandas/core/ops/missing.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -119,9 +119,8 @@ def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:
119119
x_lt0 = x < 0
120120
x_gt0 = x > 0
121121
nan_mask = zmask & (x == 0)
122-
with np.errstate(invalid="ignore"):
123-
neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0)
124-
posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0)
122+
neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0)
123+
posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0)
125124

126125
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
127126
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN

pandas/core/reshape/merge.py

+20-20
Original file line numberDiff line numberDiff line change
@@ -1320,16 +1320,16 @@ def _maybe_coerce_merge_keys(self) -> None:
13201320
# "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"
13211321
casted = lk.astype(rk.dtype) # type: ignore[arg-type]
13221322

1323-
mask = ~np.isnan(lk)
1324-
match = lk == casted
1325-
if not match[mask].all():
1326-
warnings.warn(
1327-
"You are merging on int and float "
1328-
"columns where the float values "
1329-
"are not equal to their int representation.",
1330-
UserWarning,
1331-
stacklevel=find_stack_level(),
1332-
)
1323+
mask = ~np.isnan(lk)
1324+
match = lk == casted
1325+
if not match[mask].all():
1326+
warnings.warn(
1327+
"You are merging on int and float "
1328+
"columns where the float values "
1329+
"are not equal to their int representation.",
1330+
UserWarning,
1331+
stacklevel=find_stack_level(),
1332+
)
13331333
continue
13341334

13351335
if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype):
@@ -1340,16 +1340,16 @@ def _maybe_coerce_merge_keys(self) -> None:
13401340
# "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"
13411341
casted = rk.astype(lk.dtype) # type: ignore[arg-type]
13421342

1343-
mask = ~np.isnan(rk)
1344-
match = rk == casted
1345-
if not match[mask].all():
1346-
warnings.warn(
1347-
"You are merging on int and float "
1348-
"columns where the float values "
1349-
"are not equal to their int representation.",
1350-
UserWarning,
1351-
stacklevel=find_stack_level(),
1352-
)
1343+
mask = ~np.isnan(rk)
1344+
match = rk == casted
1345+
if not match[mask].all():
1346+
warnings.warn(
1347+
"You are merging on int and float "
1348+
"columns where the float values "
1349+
"are not equal to their int representation.",
1350+
UserWarning,
1351+
stacklevel=find_stack_level(),
1352+
)
13531353
continue
13541354

13551355
# let's infer and see if we are ok

pandas/core/series.py

+6-8
Original file line numberDiff line numberDiff line change
@@ -3181,10 +3181,10 @@ def combine(
31813181
new_index = self.index.union(other.index)
31823182
new_name = ops.get_op_result_name(self, other)
31833183
new_values = np.empty(len(new_index), dtype=object)
3184-
for i, idx in enumerate(new_index):
3185-
lv = self.get(idx, fill_value)
3186-
rv = other.get(idx, fill_value)
3187-
with np.errstate(all="ignore"):
3184+
with np.errstate(all="ignore"):
3185+
for i, idx in enumerate(new_index):
3186+
lv = self.get(idx, fill_value)
3187+
rv = other.get(idx, fill_value)
31883188
new_values[i] = func(lv, rv)
31893189
else:
31903190
# Assume that other is a scalar, so apply the function for
@@ -4564,8 +4564,7 @@ def _reduce(
45644564
f"Series.{name} does not allow {kwd_name}={numeric_only} "
45654565
"with non-numeric dtypes."
45664566
)
4567-
with np.errstate(all="ignore"):
4568-
return op(delegate, skipna=skipna, **kwds)
4567+
return op(delegate, skipna=skipna, **kwds)
45694568

45704569
def _reindex_indexer(
45714570
self,
@@ -5772,8 +5771,7 @@ def _cmp_method(self, other, op):
57725771
lvalues = self._values
57735772
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
57745773

5775-
with np.errstate(all="ignore"):
5776-
res_values = ops.comparison_op(lvalues, rvalues, op)
5774+
res_values = ops.comparison_op(lvalues, rvalues, op)
57775775

57785776
return self._construct_result(res_values, name=res_name)
57795777

0 commit comments

Comments
 (0)