Skip to content

STY: remove --keep-runtime-typing from pyupgrade Part-2 #40802

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 6, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
Optional,
Sequence,
Tuple,
Type,
Type as type_t,
TypeVar,
Union,
)
Expand Down Expand Up @@ -119,7 +119,7 @@
# dtypes
NpDtype = Union[str, np.dtype]
Dtype = Union[
"ExtensionDtype", NpDtype, Type[Union[str, float, int, complex, bool, object]]
"ExtensionDtype", NpDtype, type_t[Union[str, float, int, complex, bool, object]]
]
# DtypeArg specifies all allowable dtypes in a functions its dtype argument
DtypeArg = Union[Dtype, Dict[Hashable, Dtype]]
Expand Down
25 changes: 10 additions & 15 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,8 @@
from shutil import get_terminal_size
from typing import (
TYPE_CHECKING,
Dict,
Hashable,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
Expand Down Expand Up @@ -359,7 +354,7 @@ def __init__(
values,
categories=None,
ordered=None,
dtype: Optional[Dtype] = None,
dtype: Dtype | None = None,
fastpath=False,
copy: bool = True,
):
Expand Down Expand Up @@ -473,11 +468,11 @@ def dtype(self) -> CategoricalDtype:
return self._dtype

@property
def _constructor(self) -> Type[Categorical]:
def _constructor(self) -> type[Categorical]:
return Categorical

@classmethod
def _from_sequence(cls, scalars, *, dtype: Optional[Dtype] = None, copy=False):
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):
return Categorical(scalars, dtype=dtype, copy=copy)

def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
Expand Down Expand Up @@ -547,7 +542,7 @@ def itemsize(self) -> int:
"""
return self.categories.itemsize

def tolist(self) -> List[Scalar]:
def tolist(self) -> list[Scalar]:
"""
Return a list of the values.

Expand Down Expand Up @@ -630,7 +625,7 @@ def _from_inferred_categories(

@classmethod
def from_codes(
cls, codes, categories=None, ordered=None, dtype: Optional[Dtype] = None
cls, codes, categories=None, ordered=None, dtype: Dtype | None = None
):
"""
Make a Categorical type from codes and categories or dtype.
Expand Down Expand Up @@ -1369,7 +1364,7 @@ def _validate_fill_value(self, fill_value):

# -------------------------------------------------------------

def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
"""
The numpy array interface.

Expand Down Expand Up @@ -1960,7 +1955,7 @@ def _validate_setitem_value(self, value):
codes = self.categories.get_indexer(rvalue)
return codes.astype(self._ndarray.dtype, copy=False)

def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
def _reverse_indexer(self) -> dict[Hashable, np.ndarray]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
Expand Down Expand Up @@ -2187,7 +2182,7 @@ def equals(self, other: object) -> bool:

@classmethod
def _concat_same_type(
cls: Type[CategoricalT], to_concat: Sequence[CategoricalT], axis: int = 0
cls: type[CategoricalT], to_concat: Sequence[CategoricalT], axis: int = 0
) -> CategoricalT:
from pandas.core.dtypes.concat import union_categoricals

Expand Down Expand Up @@ -2643,7 +2638,7 @@ def recode_for_categories(
return new_codes


def factorize_from_iterable(values) -> Tuple[np.ndarray, Index]:
def factorize_from_iterable(values) -> tuple[np.ndarray, Index]:
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
Expand Down Expand Up @@ -2682,7 +2677,7 @@ def factorize_from_iterable(values) -> Tuple[np.ndarray, Index]:
return codes, categories


def factorize_from_iterables(iterables) -> Tuple[List[np.ndarray], List[Index]]:
def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]:
"""
A higher-level wrapper over `factorize_from_iterable`.

Expand Down
61 changes: 29 additions & 32 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,7 @@
TYPE_CHECKING,
Any,
Callable,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
Expand Down Expand Up @@ -156,25 +153,25 @@ class DatetimeLikeArrayMixin(OpsMixin, NDArrayBackedExtensionArray):
"""

# _infer_matches -> which infer_dtype strings are close enough to our own
_infer_matches: Tuple[str, ...]
_infer_matches: tuple[str, ...]
_is_recognized_dtype: Callable[[DtypeObj], bool]
_recognized_scalars: Tuple[Type, ...]
_recognized_scalars: tuple[type, ...]
_ndarray: np.ndarray

def __init__(self, data, dtype: Optional[Dtype] = None, freq=None, copy=False):
def __init__(self, data, dtype: Dtype | None = None, freq=None, copy=False):
raise AbstractMethodError(self)

@classmethod
def _simple_new(
cls: Type[DatetimeLikeArrayT],
cls: type[DatetimeLikeArrayT],
values: np.ndarray,
freq: Optional[BaseOffset] = None,
dtype: Optional[Dtype] = None,
freq: BaseOffset | None = None,
dtype: Dtype | None = None,
) -> DatetimeLikeArrayT:
raise AbstractMethodError(cls)

@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
def _scalar_type(self) -> type[DatetimeLikeScalar]:
"""
The scalar associated with this datelike
Expand Down Expand Up @@ -206,7 +203,7 @@ def _scalar_from_string(self, value: str) -> DTScalarOrNaT:

def _unbox_scalar(
self, value: DTScalarOrNaT, setitem: bool = False
) -> Union[np.int64, np.datetime64, np.timedelta64]:
) -> np.int64 | np.datetime64 | np.timedelta64:
"""
Unbox the integer value of a scalar `value`.
Expand Down Expand Up @@ -334,15 +331,15 @@ def _formatter(self, boxed: bool = False):
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods

def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._ndarray

def __getitem__(
self, key: Union[int, slice, np.ndarray]
) -> Union[DatetimeLikeArrayMixin, DTScalarOrNaT]:
self, key: int | slice | np.ndarray
) -> DatetimeLikeArrayMixin | DTScalarOrNaT:
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
Expand All @@ -354,7 +351,7 @@ def __getitem__(
result._freq = self._get_getitem_freq(key)
return result

def _get_getitem_freq(self, key) -> Optional[BaseOffset]:
def _get_getitem_freq(self, key) -> BaseOffset | None:
"""
Find the `freq` attribute to assign to the result of a __getitem__ lookup.
"""
Expand Down Expand Up @@ -386,8 +383,8 @@ def _get_getitem_freq(self, key) -> Optional[BaseOffset]:
# ndarray]"
def __setitem__( # type: ignore[override]
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
key: int | Sequence[int] | Sequence[bool] | slice,
value: NaTType | Any | Sequence[Any],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
Expand Down Expand Up @@ -469,10 +466,10 @@ def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray:
...

@overload
def view(self, dtype: Optional[Dtype] = ...) -> ArrayLike:
def view(self, dtype: Dtype | None = ...) -> ArrayLike:
...

def view(self, dtype: Optional[Dtype] = None) -> ArrayLike:
def view(self, dtype: Dtype | None = None) -> ArrayLike:
# We handle datetime64, datetime64tz, timedelta64, and period
# dtypes here. Everything else we pass through to the underlying
# ndarray.
Expand Down Expand Up @@ -509,7 +506,7 @@ def view(self, dtype: Optional[Dtype] = None) -> ArrayLike:

@classmethod
def _concat_same_type(
cls: Type[DatetimeLikeArrayT],
cls: type[DatetimeLikeArrayT],
to_concat: Sequence[DatetimeLikeArrayT],
axis: int = 0,
) -> DatetimeLikeArrayT:
Expand Down Expand Up @@ -545,7 +542,7 @@ def _values_for_factorize(self):

@classmethod
def _from_factorized(
cls: Type[DatetimeLikeArrayT], values, original: DatetimeLikeArrayT
cls: type[DatetimeLikeArrayT], values, original: DatetimeLikeArrayT
) -> DatetimeLikeArrayT:
return cls(values, dtype=original.dtype)

Expand Down Expand Up @@ -789,7 +786,7 @@ def _validate_setitem_value(self, value):

def _unbox(
self, other, setitem: bool = False
) -> Union[np.int64, np.datetime64, np.timedelta64, np.ndarray]:
) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray:
"""
Unbox either a scalar with _unbox_scalar or an instance of our own type.
"""
Expand Down Expand Up @@ -939,7 +936,7 @@ def freq(self, value):
self._freq = value

@property
def freqstr(self) -> Optional[str]:
def freqstr(self) -> str | None:
"""
Return the frequency object as a string if its set, otherwise None.
"""
Expand All @@ -948,7 +945,7 @@ def freqstr(self) -> Optional[str]:
return self.freq.freqstr

@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self) -> Optional[str]:
def inferred_freq(self) -> str | None:
"""
Tries to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
Expand All @@ -962,7 +959,7 @@ def inferred_freq(self) -> Optional[str]:
return None

@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Optional[Resolution]:
def _resolution_obj(self) -> Resolution | None:
freqstr = self.freqstr
if freqstr is None:
return None
Expand Down Expand Up @@ -1020,7 +1017,7 @@ def _validate_frequency(cls, index, freq, **kwargs):

@classmethod
def _generate_range(
cls: Type[DatetimeLikeArrayT], start, end, periods, freq, *args, **kwargs
cls: type[DatetimeLikeArrayT], start, end, periods, freq, *args, **kwargs
) -> DatetimeLikeArrayT:
raise AbstractMethodError(cls)

Expand Down Expand Up @@ -1443,7 +1440,7 @@ def __isub__(self, other):
# --------------------------------------------------------------
# Reductions

def min(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs):
def min(self, *, axis: int | None = None, skipna: bool = True, **kwargs):
"""
Return the minimum value of the Array or minimum along
an axis.
Expand Down Expand Up @@ -1472,7 +1469,7 @@ def min(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs):
result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)

def max(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs):
def max(self, *, axis: int | None = None, skipna: bool = True, **kwargs):
"""
Return the maximum value of the Array or maximum along
an axis.
Expand Down Expand Up @@ -1503,7 +1500,7 @@ def max(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs):
result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)

def mean(self, *, skipna: bool = True, axis: Optional[int] = 0):
def mean(self, *, skipna: bool = True, axis: int | None = 0):
"""
Return the mean value of the Array.
Expand Down Expand Up @@ -1542,7 +1539,7 @@ def mean(self, *, skipna: bool = True, axis: Optional[int] = 0):
)
return self._wrap_reduction_result(axis, result)

def median(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs):
def median(self, *, axis: int | None = None, skipna: bool = True, **kwargs):
nv.validate_median((), kwargs)

if axis is not None and abs(axis) >= self.ndim:
Expand Down Expand Up @@ -1752,11 +1749,11 @@ def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
# --------------------------------------------------------------
# Reductions

def any(self, *, axis: Optional[int] = None, skipna: bool = True):
def any(self, *, axis: int | None = None, skipna: bool = True):
# GH#34479 discussion of desired behavior long-term
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())

def all(self, *, axis: Optional[int] = None, skipna: bool = True):
def all(self, *, axis: int | None = None, skipna: bool = True):
# GH#34479 discussion of desired behavior long-term
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())

Expand Down
Loading