diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 32d6279250fe2..1be2ec0dd92d7 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -190,7 +190,7 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): _infer_matches = ("datetime", "datetime64", "date") # define my properties & methods for delegation - _bool_ops = [ + _bool_ops: list[str] = [ "is_month_start", "is_month_end", "is_quarter_start", @@ -199,8 +199,8 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): "is_year_end", "is_leap_year", ] - _object_ops = ["freq", "tz"] - _field_ops = [ + _object_ops: list[str] = ["freq", "tz"] + _field_ops: list[str] = [ "year", "month", "day", @@ -220,9 +220,9 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): "microsecond", "nanosecond", ] - _other_ops = ["date", "time", "timetz"] - _datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops - _datetimelike_methods = [ + _other_ops: list[str] = ["date", "time", "timetz"] + _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops + _datetimelike_methods: list[str] = [ "to_period", "tz_localize", "tz_convert", diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index c256cc2e6a368..8d3a8feb89d67 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -87,7 +87,7 @@ IntervalArrayT = TypeVar("IntervalArrayT", bound="IntervalArray") -_interval_shared_docs = {} +_interval_shared_docs: dict[str, str] = {} _shared_docs_kwargs = { "klass": "IntervalArray", diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 7234772466bd5..62829efb9dd8a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -157,9 +157,9 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps): # Names others delegate to us _other_ops: list[str] = [] - _bool_ops = ["is_leap_year"] - _object_ops = ["start_time", "end_time", "freq"] - _field_ops = [ + _bool_ops: list[str] = ["is_leap_year"] + _object_ops: list[str] = ["start_time", "end_time", "freq"] + _field_ops: list[str] = [ "year", "month", "day", diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2f1e8fa9a80a7..5e96eecd68604 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1797,9 +1797,9 @@ def _drop_labels_or_levels(self, keys, axis: int = 0): Parameters ---------- - keys: str or list of str + keys : str or list of str labels or levels to drop - axis: int, default 0 + axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 927eb8eed8454..50248d5af8883 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -3,8 +3,9 @@ hold the allowlist of methods that are exposed on the SeriesGroupBy and the DataFrameGroupBy objects. """ +from __future__ import annotations + import collections -from typing import List from pandas._typing import final @@ -19,7 +20,7 @@ class ShallowMixin(PandasObject): - _attributes: List[str] = [] + _attributes: list[str] = [] @final def _shallow_copy(self, obj, **kwargs): @@ -39,7 +40,7 @@ class GotItemMixin(PandasObject): Provide the groupby facilities to the mixed object. """ - _attributes: List[str] + _attributes: list[str] @final def _gotitem(self, key, ndim, subset=None): @@ -106,12 +107,16 @@ def _gotitem(self, key, ndim, subset=None): | plotting_methods ) -series_apply_allowlist = ( +series_apply_allowlist: frozenset[str] = ( common_apply_allowlist - | {"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"} + | frozenset( + {"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"} + ) ) | frozenset(["dtype", "unique"]) -dataframe_apply_allowlist = common_apply_allowlist | frozenset(["dtypes", "corrwith"]) +dataframe_apply_allowlist: frozenset[str] = common_apply_allowlist | frozenset( + ["dtypes", "corrwith"] +) # cythonized transformations or canned "agg+broadcast", which do not # require postprocessing of the result by transform. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a0727500ecc81..77770f6c79c8b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -173,7 +173,7 @@ _unsortable_types = frozenset(("mixed", "mixed-integer")) -_index_doc_kwargs = { +_index_doc_kwargs: dict[str, str] = { "klass": "Index", "inplace": "", "target_klass": "Index", @@ -181,7 +181,7 @@ "unique": "Index", "duplicated": "np.ndarray", } -_index_shared_docs = {} +_index_shared_docs: dict[str, str] = {} str_t = str @@ -1189,7 +1189,7 @@ def _format_with_header( return header + result @final - def to_native_types(self, slicer=None, **kwargs): + def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: """ Format specified values of `self` and return them. @@ -4390,7 +4390,7 @@ def memory_usage(self, deep: bool = False) -> int: return result @final - def where(self, cond, other=None): + def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. @@ -4606,7 +4606,7 @@ def _can_hold_identifiers_and_holds_name(self, name) -> bool: return name in self return False - def append(self, other): + def append(self, other) -> Index: """ Append a collection of Index options together. @@ -4616,7 +4616,7 @@ def append(self, other): Returns ------- - appended : Index + Index """ to_concat = [self] @@ -4846,7 +4846,7 @@ def asof(self, label): loc = loc.indices(len(self))[-1] return self[loc] - def asof_locs(self, where: Index, mask) -> np.ndarray: + def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray: """ Return the locations (indices) of labels in the index. @@ -4863,13 +4863,13 @@ def asof_locs(self, where: Index, mask) -> np.ndarray: ---------- where : Index An Index consisting of an array of timestamps. - mask : array-like + mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- - numpy.ndarray + np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. @@ -4877,7 +4877,7 @@ def asof_locs(self, where: Index, mask) -> np.ndarray: locs = self._values[mask].searchsorted(where._values, side="right") locs = np.where(locs > 0, locs - 1, 0) - result = np.arange(len(self))[mask].take(locs) + result = np.arange(len(self), dtype=np.intp)[mask].take(locs) # TODO: overload return type of ExtensionArray.__getitem__ first_value = cast(Any, self._values[mask.argmax()]) @@ -5050,7 +5050,7 @@ def argsort(self, *args, **kwargs) -> np.ndarray: Returns ------- - numpy.ndarray + np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. @@ -5838,7 +5838,7 @@ def delete(self, loc) -> Index: Returns ------- Index - New Index with passed location(-s) deleted. + Will be same type as self, except for RangeIndex. See Also -------- @@ -6352,8 +6352,8 @@ def _maybe_cast_data_without_dtype(subarr): elif inferred == "interval": try: - data = IntervalArray._from_sequence(subarr, copy=False) - return data + ia_data = IntervalArray._from_sequence(subarr, copy=False) + return ia_data except (ValueError, TypeError): # GH27172: mixed closed Intervals --> object dtype pass diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index c9c39fde1da46..b5089621313b8 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -1,8 +1,8 @@ +from __future__ import annotations + from typing import ( Any, Hashable, - List, - Optional, ) import warnings @@ -50,7 +50,7 @@ inherit_names, ) -_index_doc_kwargs = dict(ibase._index_doc_kwargs) +_index_doc_kwargs: dict[str, str] = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update({"target_klass": "CategoricalIndex"}) @@ -216,10 +216,10 @@ def __new__( data=None, categories=None, ordered=None, - dtype: Optional[Dtype] = None, - copy=False, - name=None, - ): + dtype: Dtype | None = None, + copy: bool = False, + name: Hashable = None, + ) -> CategoricalIndex: name = maybe_extract_name(name, data, cls) @@ -239,7 +239,7 @@ def _shallow_copy( self, values: Categorical, name: Hashable = no_default, - ): + ) -> CategoricalIndex: name = self._name if name is no_default else name if values is not None: @@ -349,7 +349,7 @@ def _format_attrs(self): attrs.append(("length", len(self))) return attrs - def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]: + def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]: from pandas.io.formats.printing import pprint_thing result = [ @@ -422,10 +422,9 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): target = ibase.ensure_index(target) - missing: List[int] if self.equals(target): indexer = None - missing = [] + missing = np.array([], dtype=np.intp) else: indexer, missing = self.get_indexer_non_unique(np.array(target)) @@ -494,8 +493,8 @@ def _maybe_cast_indexer(self, key) -> int: def _get_indexer( self, target: Index, - method: Optional[str] = None, - limit: Optional[int] = None, + method: str | None = None, + limit: int | None = None, tolerance=None, ) -> np.ndarray: @@ -626,7 +625,7 @@ def map(self, mapper): mapped = self._values.map(mapper) return Index(mapped, name=self.name) - def _concat(self, to_concat: List[Index], name: Hashable) -> Index: + def _concat(self, to_concat: list[Index], name: Hashable) -> Index: # if calling index is category, don't check dtype of others try: codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat]) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b3714ff004978..9f02196466ebf 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -8,7 +8,10 @@ tzinfo, ) import operator -from typing import TYPE_CHECKING +from typing import ( + TYPE_CHECKING, + Hashable, +) import warnings import numpy as np @@ -314,15 +317,15 @@ def __new__( data=None, freq=lib.no_default, tz=None, - normalize=False, + normalize: bool = False, closed=None, ambiguous="raise", - dayfirst=False, - yearfirst=False, + dayfirst: bool = False, + yearfirst: bool = False, dtype: Dtype | None = None, - copy=False, - name=None, - ): + copy: bool = False, + name: Hashable = None, + ) -> DatetimeIndex: if is_scalar(data): raise TypeError( @@ -637,7 +640,7 @@ def _validate_partial_date_slice(self, reso: Resolution): # _parsed_string_to_bounds allows it. raise KeyError - def _deprecate_mismatched_indexing(self, key): + def _deprecate_mismatched_indexing(self, key) -> None: # GH#36148 # we get here with isinstance(key, self._data._recognized_scalars) try: @@ -851,7 +854,7 @@ def inferred_type(self) -> str: # sure we can't have ambiguous indexing return "datetime64" - def indexer_at_time(self, time, asof=False): + def indexer_at_time(self, time, asof: bool = False) -> np.ndarray: """ Return index locations of values at particular time of day (e.g. 9:30AM). @@ -865,7 +868,7 @@ def indexer_at_time(self, time, asof=False): Returns ------- - values_at_time : array of integers + np.ndarray[np.intp] See Also -------- @@ -891,8 +894,8 @@ def indexer_at_time(self, time, asof=False): return (micros == time_micros).nonzero()[0] def indexer_between_time( - self, start_time, end_time, include_start=True, include_end=True - ): + self, start_time, end_time, include_start: bool = True, include_end: bool = True + ) -> np.ndarray: """ Return index locations of values between particular times of day (e.g., 9:00-9:30AM). @@ -908,7 +911,7 @@ def indexer_between_time( Returns ------- - values_between_time : array of integers + np.ndarray[np.intp] See Also -------- @@ -948,8 +951,8 @@ def date_range( periods=None, freq=None, tz=None, - normalize=False, - name=None, + normalize: bool = False, + name: Hashable = None, closed=None, **kwargs, ) -> DatetimeIndex: @@ -1120,8 +1123,8 @@ def bdate_range( periods: int | None = None, freq="B", tz=None, - normalize=True, - name=None, + normalize: bool = True, + name: Hashable = None, weekmask=None, holidays=None, closed=None, diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 76c16dee1cda1..b11ec06120e0c 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -351,7 +351,7 @@ def map(self, mapper, na_action=None): return self.astype(object).map(mapper) @doc(Index.astype) - def astype(self, dtype, copy=True): + def astype(self, dtype, copy: bool = True) -> Index: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): if not copy: @@ -410,7 +410,7 @@ def _simple_new( def _get_engine_target(self) -> np.ndarray: return self._data._ndarray - def insert(self: _T, loc: int, item) -> _T: + def insert(self: _T, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python list.append semantics for negative values. diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index cd85efaba1d33..f7ab09e4f176f 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -283,7 +283,7 @@ def __new__( copy: bool = False, name: Hashable = None, verify_integrity: bool = True, - ): + ) -> IntervalIndex: name = maybe_extract_name(name, data, cls) @@ -318,10 +318,10 @@ def from_breaks( cls, breaks, closed: str = "right", - name=None, + name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, - ): + ) -> IntervalIndex: with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_breaks( breaks, closed=closed, copy=copy, dtype=dtype @@ -349,10 +349,10 @@ def from_arrays( left, right, closed: str = "right", - name=None, + name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, - ): + ) -> IntervalIndex: with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_arrays( left, right, closed, copy=copy, dtype=dtype @@ -379,10 +379,10 @@ def from_tuples( cls, data, closed: str = "right", - name=None, + name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, - ): + ) -> IntervalIndex: with rewrite_exception("IntervalArray", cls.__name__): arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype) return cls._simple_new(arr, name=name) @@ -878,7 +878,7 @@ def putmask(self, mask, value) -> Index: arr.putmask(mask, value) return type(self)._simple_new(arr, name=self.name) - def insert(self, loc, item): + def insert(self, loc: int, item): """ Return a new IntervalIndex inserting new item at location. Follows Python list.append semantics for negative values. Only Interval @@ -1077,8 +1077,8 @@ def _is_type_compatible(a, b) -> bool: def interval_range( - start=None, end=None, periods=None, freq=None, name=None, closed="right" -): + start=None, end=None, periods=None, freq=None, name: Hashable = None, closed="right" +) -> IntervalIndex: """ Return a fixed frequency IntervalIndex. diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a32585c3bed13..18e441ef165c9 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -4,7 +4,10 @@ datetime, timedelta, ) -from typing import Any +from typing import ( + Any, + Hashable, +) import warnings import numpy as np @@ -215,10 +218,10 @@ def __new__( ordinal=None, freq=None, dtype: Dtype | None = None, - copy=False, - name=None, + copy: bool = False, + name: Hashable = None, **fields, - ): + ) -> PeriodIndex: valid_field_set = { "year", @@ -322,7 +325,7 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: # ------------------------------------------------------------------------ # Rendering Methods - def _mpl_repr(self): + def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib return self.astype(object)._values @@ -386,7 +389,8 @@ def __array_wrap__(self, result, context=None): def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray: """ where : array of timestamps - mask : array of booleans where data is not NA + mask : np.ndarray[bool] + Array of booleans where data is not NA. """ if isinstance(where, DatetimeIndex): where = PeriodIndex(where._values, freq=self.freq) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 56bd11056b380..8e8c67927c20f 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -106,9 +106,9 @@ def __new__( stop=None, step=None, dtype: Dtype | None = None, - copy=False, - name=None, - ): + copy: bool = False, + name: Hashable = None, + ) -> RangeIndex: # error: Argument 1 to "_validate_dtype" of "NumericIndex" has incompatible type # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], @@ -584,7 +584,7 @@ def _intersection(self, other: Index, sort=False): # solve intersection problem # performance hint: for identical step sizes, could use # cheaper alternative - gcd, s, t = self._extended_gcd(first.step, second.step) + gcd, s, _ = self._extended_gcd(first.step, second.step) # check whether element sets intersect if (first.start - second.start) % gcd: @@ -619,7 +619,7 @@ def _max_fitting_element(self, upper_limit: int) -> int: no_steps = (upper_limit - self.start) // abs(self.step) return self.start + abs(self.step) * no_steps - def _extended_gcd(self, a, b): + def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]: """ Extended Euclidean algorithms to solve Bezout's identity: a*x + b*y = gcd(x, y) @@ -745,7 +745,7 @@ def _difference(self, other, sort=None): new_index = new_index[::-1] return new_index - def symmetric_difference(self, other, result_name=None, sort=None): + def symmetric_difference(self, other, result_name: Hashable = None, sort=None): if not isinstance(other, RangeIndex) or sort is not None: return super().symmetric_difference(other, result_name, sort) @@ -759,7 +759,7 @@ def symmetric_difference(self, other, result_name=None, sort=None): # -------------------------------------------------------------------- - def _concat(self, indexes, name): + def _concat(self, indexes, name: Hashable): """ Overriding parent method for the case of all RangeIndex instances. diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e1ba532e6a5f1..fb542711329b4 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1066,7 +1066,7 @@ def _getitem_iterable(self, key, axis: int): ---------- key : iterable Targeted labels. - axis: int + axis : int Dimension on which the indexing is being made. Raises @@ -1266,7 +1266,7 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): ---------- key : list-like Targeted labels. - axis: int + axis: int Dimension on which the indexing is being made. raise_missing: bool, default False Whether to raise a KeyError if some labels were not found. @@ -1321,7 +1321,7 @@ def _validate_read_indexer( indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). - axis: int + axis : int Dimension on which the indexing is being made. raise_missing: bool Whether to raise a KeyError if some labels are not found. Will be diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 43a61ed799e8f..54588eafc3fa0 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -504,7 +504,7 @@ def nanall( Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known @@ -547,7 +547,7 @@ def nansum( Parameters ---------- values : ndarray[dtype] - axis: int, optional + axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional @@ -622,7 +622,7 @@ def nanmean( Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known @@ -678,7 +678,7 @@ def nanmedian(values, *, axis=None, skipna=True, mask=None): Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known @@ -835,7 +835,7 @@ def nanstd(values, *, axis=None, skipna=True, ddof=1, mask=None): Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, @@ -875,7 +875,7 @@ def nanvar(values, *, axis=None, skipna=True, ddof=1, mask=None): Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, @@ -950,7 +950,7 @@ def nansem( Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, @@ -1031,7 +1031,7 @@ def nanargmax( Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known @@ -1077,7 +1077,7 @@ def nanargmin( Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known @@ -1129,7 +1129,7 @@ def nanskew( Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known @@ -1216,7 +1216,7 @@ def nankurt( Parameters ---------- values : ndarray - axis: int, optional + axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known @@ -1307,7 +1307,7 @@ def nanprod( Parameters ---------- values : ndarray[dtype] - axis: int, optional + axis : int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 689f27a25f11b..f6bde348888a1 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -212,8 +212,8 @@ def align_method_FRAME( ---------- left : DataFrame right : Any - axis: int, str, or None - flex: bool or None, default False + axis : int, str, or None + flex : bool or None, default False Whether this is a flex op, in which case we reindex. None indicates not to check for alignment. level : int or level name, default None diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index ec7f66f1e515e..e2ff09705f4cb 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -409,7 +409,7 @@ def nargminmax(values, method: str, axis: int = 0): ---------- values : ExtensionArray method : {"argmax", "argmin"} - axis: int, default 0 + axis : int, default 0 Returns ------- diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py index 1c533ec9fb1e3..62fe1ed3a7c49 100644 --- a/pandas/tests/reshape/concat/test_append.py +++ b/pandas/tests/reshape/concat/test_append.py @@ -190,7 +190,10 @@ def test_append_preserve_index_name(self): pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]) ] - all_indexes = indexes_can_append + indexes_cannot_append_with_other + # error: Unsupported operand types for + ("List[Index]" and "List[MultiIndex]") + all_indexes = ( + indexes_can_append + indexes_cannot_append_with_other # type: ignore[operator] + ) @pytest.mark.parametrize("index", all_indexes, ids=lambda x: type(x).__name__) def test_append_same_columns_type(self, index):