Skip to content

Backport PR #61265: TYP: Add ignores for numpy 2.2 updates #61266

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def _reconstruct_data(
values = cls._from_sequence(values, dtype=dtype)

else:
values = values.astype(dtype, copy=False)
values = values.astype(dtype, copy=False) # type: ignore[assignment]

return values

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/array_algos/quantile.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def quantile_with_mask(
interpolation=interpolation,
)

result = np.asarray(result)
result = np.asarray(result) # type: ignore[assignment]
result = result.T

return result
Expand Down Expand Up @@ -196,7 +196,7 @@ def _nanpercentile(
# Caller is responsible for ensuring mask shape match
assert mask.shape == values.shape
result = [
_nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation)
_nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation) # type: ignore[arg-type]
for (val, m) in zip(list(values), list(mask))
]
if values.dtype.kind == "f":
Expand Down
8 changes: 1 addition & 7 deletions pandas/core/arrays/_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,18 +141,12 @@ def view(self, dtype: Dtype | None = None) -> ArrayLike:

dt64_values = arr.view(dtype)
return DatetimeArray._simple_new(dt64_values, dtype=dtype)

elif lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype):
from pandas.core.arrays import TimedeltaArray

td64_values = arr.view(dtype)
return TimedeltaArray._simple_new(td64_values, dtype=dtype)

# error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible
# type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None,
# type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
# Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
return arr.view(dtype=dtype) # type: ignore[arg-type]
return arr.view(dtype=dtype)

def take(
self,
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/arrow/_arrow_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def pyarrow_array_to_numpy_and_mask(
mask = pyarrow.BooleanArray.from_buffers(
pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
)
mask = np.asarray(mask)
mask = np.asarray(mask) # type: ignore[assignment]
else:
mask = np.ones(len(arr), dtype=bool)
return data, mask
2 changes: 1 addition & 1 deletion pandas/core/arrays/arrow/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -2499,7 +2499,7 @@ def _str_get_dummies(self, sep: str = "|"):
indices = indices + np.arange(n_rows).repeat(lengths) * n_cols
dummies = np.zeros(n_rows * n_cols, dtype=np.bool_)
dummies[indices] = True
dummies = dummies.reshape((n_rows, n_cols))
dummies = dummies.reshape((n_rows, n_cols)) # type: ignore[assignment]
result = type(self)(pa.array(list(dummies)))
return result, uniques_sorted.to_pylist()

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -569,7 +569,7 @@ def to_numpy(
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
result[self.isna()] = na_value # type: ignore[index]
return result

# ------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -1839,7 +1839,7 @@ def value_counts(self, dropna: bool = True) -> Series:
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = np.append(ix, -1) # type: ignore[assignment]

ix = coerce_indexer_dtype(ix, self.dtype.categories)
ix = self._from_backing_data(ix)
Expand Down
9 changes: 3 additions & 6 deletions pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def _simple_new( # type: ignore[override]
else:
# DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC],
# then values.dtype should be M8[us].
assert dtype._creso == get_unit_from_dtype(values.dtype)
assert dtype._creso == get_unit_from_dtype(values.dtype) # type: ignore[union-attr]

result = super()._simple_new(values, dtype)
result._freq = freq
Expand Down Expand Up @@ -529,7 +529,7 @@ def _unbox_scalar(self, value) -> np.datetime64:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value)
if value is NaT:
return np.datetime64(value._value, self.unit)
return np.datetime64(value._value, self.unit) # type: ignore[call-overload]
else:
return value.as_unit(self.unit).asm8

Expand Down Expand Up @@ -803,10 +803,7 @@ def _add_offset(self, offset: BaseOffset) -> Self:
try:
res_values = offset._apply_array(values._ndarray)
if res_values.dtype.kind == "i":
# error: Argument 1 to "view" of "ndarray" has incompatible type
# "dtype[datetime64] | DatetimeTZDtype"; expected
# "dtype[Any] | type[Any] | _SupportsDType[dtype[Any]]"
res_values = res_values.view(values.dtype) # type: ignore[arg-type]
res_values = res_values.view(values.dtype)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/arrays/masked.py
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@ def tolist(self):
if self.ndim > 1:
return [x.tolist() for x in self]
dtype = None if self._hasna else self._data.dtype
return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist()
return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist() # type: ignore[return-value]

@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
Expand Down Expand Up @@ -1512,10 +1512,10 @@ def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
result = values.all(axis=axis)

if skipna:
return result
return result # type: ignore[return-value]
else:
if not result or len(self) == 0 or not self._mask.any():
return result
return result # type: ignore[return-value]
else:
return self.dtype.na_value

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/scipy_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def _levels_to_axis(
ax_coords = codes[valid_ilocs]

ax_labels = ax_labels.tolist()
return ax_coords, ax_labels
return ax_coords, ax_labels # pyright: ignore[reportReturnType]


def _to_ijv(
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def _unbox_scalar(self, value) -> np.timedelta64:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value)
if value is NaT:
return np.timedelta64(value._value, self.unit)
return np.timedelta64(value._value, self.unit) # type: ignore[call-overload]
else:
return value.as_unit(self.unit).asm8

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -833,7 +833,7 @@ def tolist(self):
>>> idx.to_list()
[1, 2, 3]
"""
return self._values.tolist()
return self._values.tolist() # type: ignore[return-value]

to_list = tolist

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1666,7 +1666,7 @@ def _wrap_applied_output_series(

if stacked_values.dtype == object:
# We'll have the DataFrame constructor do inference
stacked_values = stacked_values.tolist()
stacked_values = stacked_values.tolist() # type: ignore[assignment]
result = self.obj._constructor(stacked_values, index=index, columns=columns)

if not self.as_index:
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -2095,7 +2095,7 @@ def _apply_filter(self, indices, dropna):
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T # type: ignore[assignment]
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered

Expand Down Expand Up @@ -4549,11 +4549,11 @@ def blk_func(values: ArrayLike) -> ArrayLike:
)

if vals.ndim == 1:
out = out.ravel("K")
out = out.ravel("K") # type: ignore[assignment]
if result_mask is not None:
result_mask = result_mask.ravel("K")
result_mask = result_mask.ravel("K") # type: ignore[assignment]
else:
out = out.reshape(ncols, ngroups * nqs)
out = out.reshape(ncols, ngroups * nqs) # type: ignore[assignment]

return post_processor(out, inference, result_mask, orig_vals)

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1040,7 +1040,7 @@ def get_iterator(self, data: NDFrame, axis: AxisInt = 0):

length = len(data.axes[axis])

start = 0
start: np.int64 | int = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
Expand All @@ -1053,7 +1053,7 @@ def get_iterator(self, data: NDFrame, axis: AxisInt = 0):
def indices(self):
indices = collections.defaultdict(list)

i = 0
i: np.int64 | int = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/indexers/objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,8 @@ def get_window_bounds(
if closed in ["left", "neither"]:
end -= 1

end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
end = np.clip(end, 0, num_values) # type: ignore[assignment]
start = np.clip(start, 0, num_values) # type: ignore[assignment]

return start, end

Expand Down Expand Up @@ -340,7 +340,7 @@ def get_window_bounds(
start = np.arange(0, num_values, step, dtype="int64")
end = start + self.window_size
if self.window_size:
end = np.clip(end, 0, num_values)
end = np.clip(end, 0, num_values) # type: ignore[assignment]

return start, end

Expand Down Expand Up @@ -426,7 +426,7 @@ def get_window_bounds(
)
window_indices_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(
window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( # type: ignore[assignment]
np.int64, copy=False
)
start_arrays.append(window_indices.take(ensure_platform_int(start)))
Expand Down
9 changes: 1 addition & 8 deletions pandas/core/indexes/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -1119,14 +1119,7 @@ def interval_range(
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output

# error: Argument 1 to "maybe_downcast_numeric" has incompatible type
# "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]";
# expected "ndarray[Any, Any]" [
breaks = maybe_downcast_numeric(
breaks, # type: ignore[arg-type]
np.dtype("int64"),
)
breaks = maybe_downcast_numeric(breaks, np.dtype("int64")) # type: ignore[arg-type]
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -2635,7 +2635,7 @@ def _unstack(
self.values.take(
indices, allow_fill=needs_masking[i], fill_value=fill_value
),
BlockPlacement(place),
BlockPlacement(place), # type: ignore[arg-type]
ndim=2,
)
for i, (indices, place) in enumerate(zip(new_values, new_placement))
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,7 @@ def reorder_arrays(
arr = np.empty(length, dtype=object)
arr.fill(np.nan)
else:
arr = arrays[k]
arr = arrays[k] # type: ignore[assignment]
new_arrays.append(arr)

arrays = new_arrays
Expand Down
11 changes: 3 additions & 8 deletions pandas/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,8 @@ def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None:
return None

if is_valid.ndim == 2:
is_valid = is_valid.any(axis=1) # reduce axis 1
# reduce axis 1
is_valid = is_valid.any(axis=1) # type: ignore[assignment]

if how == "first":
idxpos = is_valid[::].argmax()
Expand Down Expand Up @@ -401,13 +402,7 @@ def func(yvalues: np.ndarray) -> None:
**kwargs,
)

# error: Argument 1 to "apply_along_axis" has incompatible type
# "Callable[[ndarray[Any, Any]], None]"; expected "Callable[...,
# Union[_SupportsArray[dtype[<nothing>]], Sequence[_SupportsArray
# [dtype[<nothing>]]], Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
# Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
np.apply_along_axis(func, axis, data) # type: ignore[arg-type]
np.apply_along_axis(func, axis, data)


def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/reshape/encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ def get_empty_frame(data) -> DataFrame:

if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_mat = dummy_mat[:, 1:] # type: ignore[assignment]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype)

Expand Down
4 changes: 1 addition & 3 deletions pandas/core/reshape/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -2606,9 +2606,7 @@ def _convert_arrays_and_get_rizer_klass(
lk = lk.astype(dtype, copy=False)
rk = rk.astype(dtype, copy=False)
if isinstance(lk, BaseMaskedArray):
# Invalid index type "type" for "Dict[Type[object], Type[Factorizer]]";
# expected type "Type[object]"
klass = _factorizers[lk.dtype.type] # type: ignore[index]
klass = _factorizers[lk.dtype.type]
elif isinstance(lk.dtype, ArrowDtype):
klass = _factorizers[lk.dtype.numpy_dtype.type]
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0):
zipped = zip(arr_values, mask)
else:
zipped = zip(arr_values.T, mask.T)
return np.array([_nanargminmax(v, m, func) for v, m in zipped])
return np.array([_nanargminmax(v, m, func) for v, m in zipped]) # type: ignore[arg-type]
return func(arr_values, axis=axis)

return _nanargminmax(arr_values, mask, func)
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -1507,7 +1507,7 @@ def _format_strings(self) -> list[str]:
fmt_values = values._format_native_types(
na_rep=self.nat_rep, date_format=self.date_format
)
return fmt_values.tolist()
return fmt_values.tolist() # type: ignore[return-value]


class _ExtensionArrayFormatter(_GenericArrayFormatter):
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/parsers/python_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -1299,7 +1299,7 @@ def detect_colspecs(
shifted[0] = 0
edges = np.where((mask ^ shifted) == 1)[0]
edge_pairs = list(zip(edges[::2], edges[1::2]))
return edge_pairs
return edge_pairs # type: ignore[return-value]

def __next__(self) -> list[str]:
# Argument 1 to "next" has incompatible type "Union[IO[str],
Expand Down
2 changes: 1 addition & 1 deletion pandas/plotting/_matplotlib/style.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def _random_color(column: int) -> list[float]:
"""Get a random color represented as a list of length 3"""
# GH17525 use common._random_state to avoid resetting the seed
rs = com.random_state(column)
return rs.rand(3).tolist()
return rs.rand(3).tolist() # type: ignore[return-value]


def _is_single_string_color(color: Color) -> bool:
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/dtypes/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -810,8 +810,8 @@ def test_empty_like(self):
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
+ [np.datetime64("NaT", unit) for unit in m8_units]
+ [np.timedelta64("NaT", unit) for unit in m8_units]
+ [np.datetime64("NaT", unit) for unit in m8_units] # type: ignore[call-overload]
+ [np.timedelta64("NaT", unit) for unit in m8_units] # type: ignore[call-overload]
)

inf_vals = [
Expand Down
Loading