Skip to content

STY: remove --keep-runtime-typing from pyupgrade Part-1 #40773

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 6, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 16 additions & 18 deletions pandas/_testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
ContextManager,
Counter,
Iterable,
List,
Type,
)
import warnings

Expand Down Expand Up @@ -116,24 +114,24 @@
_N = 30
_K = 4

UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES

FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]

DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]

BOOL_DTYPES: List[Dtype] = [bool, "bool"]
BYTES_DTYPES: List[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: List[Dtype] = [object, "object"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]

ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
Expand Down Expand Up @@ -428,7 +426,7 @@ def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
----------
k: length of each of the index instances
"""
make_index_funcs: List[Callable[..., Index]] = [
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
Expand Down Expand Up @@ -876,7 +874,7 @@ def skipna_wrapper(x):
return skipna_wrapper


def convert_rows_list_to_csv_str(rows_list: List[str]):
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
Expand All @@ -896,7 +894,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]):
return sep.join(rows_list) + sep


def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Expand Down
4 changes: 2 additions & 2 deletions pandas/_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
Optional,
Sequence,
Tuple,
Type,
Type as type_t,
TypeVar,
Union,
)
Expand Down Expand Up @@ -119,7 +119,7 @@
# dtypes
NpDtype = Union[str, np.dtype]
Dtype = Union[
"ExtensionDtype", NpDtype, Type[Union[str, float, int, complex, bool, object]]
"ExtensionDtype", NpDtype, type_t[Union[str, float, int, complex, bool, object]]
]
# DtypeArg specifies all allowable dtypes in a functions its dtype argument
DtypeArg = Union[Dtype, Dict[Hashable, Dtype]]
Expand Down
7 changes: 2 additions & 5 deletions pandas/compat/pickle_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@
import copy
import io
import pickle as pkl
from typing import (
TYPE_CHECKING,
Optional,
)
from typing import TYPE_CHECKING
import warnings

from pandas._libs.tslibs import BaseOffset
Expand Down Expand Up @@ -235,7 +232,7 @@ def load_newobj_ex(self):
pass


def load(fh, encoding: Optional[str] = None, is_verbose: bool = False):
def load(fh, encoding: str | None = None, is_verbose: bool = False):
"""
Load a pickle, with a provided encoding,
Expand Down
27 changes: 11 additions & 16 deletions pandas/core/aggregation.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,9 @@
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)

from pandas._typing import (
Expand All @@ -42,8 +37,8 @@


def reconstruct_func(
func: Optional[AggFuncType], **kwargs
) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]:
func: AggFuncType | None, **kwargs
) -> tuple[bool, AggFuncType | None, list[str] | None, list[int] | None]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
Expand Down Expand Up @@ -81,8 +76,8 @@ def reconstruct_func(
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: Optional[List[str]] = None
order: Optional[List[int]] = None
columns: list[str] | None = None
order: list[int] | None = None

if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
Expand Down Expand Up @@ -129,7 +124,7 @@ def is_multi_agg_with_relabel(**kwargs) -> bool:
)


def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
def normalize_keyword_aggregation(kwargs: dict) -> tuple[dict, list[str], list[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
Expand Down Expand Up @@ -187,8 +182,8 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i


def _make_unique_kwarg_list(
seq: Sequence[Tuple[Any, Any]]
) -> Sequence[Tuple[Any, Any]]:
seq: Sequence[tuple[Any, Any]]
) -> Sequence[tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Expand Down Expand Up @@ -292,10 +287,10 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any:

def relabel_result(
result: FrameOrSeries,
func: Dict[str, List[Union[Callable, str]]],
func: dict[str, list[Callable | str]],
columns: Iterable[Hashable],
order: Iterable[int],
) -> Dict[Hashable, Series]:
) -> dict[Hashable, Series]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Expand All @@ -322,7 +317,7 @@ def relabel_result(
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: Dict[Hashable, Series] = {}
reordered_result_in_dict: dict[Hashable, Series] = {}
idx = 0

reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
Expand Down Expand Up @@ -366,7 +361,7 @@ def relabel_result(

def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
) -> tuple[list[str], list[str | Callable[..., Any]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Expand Down
27 changes: 12 additions & 15 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,6 @@
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Dict,
Optional,
Tuple,
Union,
cast,
)
Expand Down Expand Up @@ -103,13 +100,13 @@
TimedeltaArray,
)

_shared_docs: Dict[str, str] = {}
_shared_docs: dict[str, str] = {}


# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values: ArrayLike) -> Tuple[np.ndarray, DtypeObj]:
def _ensure_data(values: ArrayLike) -> tuple[np.ndarray, DtypeObj]:
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
Expand Down Expand Up @@ -542,10 +539,10 @@ def f(c, v):
def factorize_array(
values: np.ndarray,
na_sentinel: int = -1,
size_hint: Optional[int] = None,
size_hint: int | None = None,
na_value=None,
mask: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray]:
mask: np.ndarray | None = None,
) -> tuple[np.ndarray, np.ndarray]:
"""
Factorize an array-like to codes and uniques.
Expand Down Expand Up @@ -608,9 +605,9 @@ def factorize_array(
def factorize(
values,
sort: bool = False,
na_sentinel: Optional[int] = -1,
size_hint: Optional[int] = None,
) -> Tuple[np.ndarray, Union[np.ndarray, Index]]:
na_sentinel: int | None = -1,
size_hint: int | None = None,
) -> tuple[np.ndarray, np.ndarray | Index]:
"""
Encode the object as an enumerated type or categorical variable.
Expand Down Expand Up @@ -926,7 +923,7 @@ def value_counts_arraylike(values, dropna: bool):
return keys, counts


def duplicated(values: ArrayLike, keep: Union[str, bool] = "first") -> np.ndarray:
def duplicated(values: ArrayLike, keep: str | bool = "first") -> np.ndarray:
"""
Return boolean ndarray denoting duplicate values.
Expand Down Expand Up @@ -1062,8 +1059,8 @@ def rank(
def checked_add_with_arr(
arr: np.ndarray,
b,
arr_mask: Optional[np.ndarray] = None,
b_mask: Optional[np.ndarray] = None,
arr_mask: np.ndarray | None = None,
b_mask: np.ndarray | None = None,
) -> np.ndarray:
"""
Perform array addition that checks for underflow and overflow.
Expand Down Expand Up @@ -1741,7 +1738,7 @@ def safe_sort(
na_sentinel: int = -1,
assume_unique: bool = False,
verify: bool = True,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
"""
Sort ``values`` and reorder corresponding ``codes``.
Expand Down
28 changes: 12 additions & 16 deletions pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,6 @@
Hashable,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
import warnings
Expand Down Expand Up @@ -77,13 +73,13 @@ def frame_apply(
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type: Optional[str] = None,
result_type: str | None = None,
args=None,
kwargs=None,
) -> FrameApply:
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
klass: Type[FrameApply]
klass: type[FrameApply]
if axis == 0:
klass = FrameRowApply
elif axis == 1:
Expand All @@ -107,7 +103,7 @@ def __init__(
obj: AggObjType,
func,
raw: bool,
result_type: Optional[str],
result_type: str | None,
args,
kwargs,
):
Expand Down Expand Up @@ -152,7 +148,7 @@ def agg_axis(self) -> Index:
def apply(self) -> FrameOrSeriesUnion:
pass

def agg(self) -> Optional[FrameOrSeriesUnion]:
def agg(self) -> FrameOrSeriesUnion | None:
"""
Provide an implementation for the aggregators.

Expand Down Expand Up @@ -265,7 +261,7 @@ def transform_dict_like(self, func):

func = self.normalize_dictlike_arg("transform", obj, func)

results: Dict[Hashable, FrameOrSeriesUnion] = {}
results: dict[Hashable, FrameOrSeriesUnion] = {}
failed_names = []
all_type_errors = True
for name, how in func.items():
Expand Down Expand Up @@ -459,7 +455,7 @@ def agg_dict_like(self) -> FrameOrSeriesUnion:

return result

def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:
def maybe_apply_str(self) -> FrameOrSeriesUnion | None:
"""
Compute apply in case of a string.

Expand Down Expand Up @@ -492,7 +488,7 @@ def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:
raise ValueError(f"Operation {f} does not support axis=1")
return obj._try_aggregate_string_function(f, *self.args, **self.kwargs)

def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]:
def maybe_apply_multiple(self) -> FrameOrSeriesUnion | None:
"""
Compute apply in case of a list-like or dict-like.

Expand Down Expand Up @@ -754,7 +750,7 @@ def apply_standard(self):
# wrap results
return self.wrap_results(results, res_index)

def apply_series_generator(self) -> Tuple[ResType, Index]:
def apply_series_generator(self) -> tuple[ResType, Index]:
assert callable(self.f)

series_gen = self.series_generator
Expand Down Expand Up @@ -1039,11 +1035,11 @@ def apply_standard(self) -> FrameOrSeriesUnion:


class GroupByApply(Apply):
obj: Union[SeriesGroupBy, DataFrameGroupBy]
obj: SeriesGroupBy | DataFrameGroupBy

def __init__(
self,
obj: Union[SeriesGroupBy, DataFrameGroupBy],
obj: SeriesGroupBy | DataFrameGroupBy,
func: AggFuncType,
args,
kwargs,
Expand All @@ -1068,11 +1064,11 @@ def transform(self):

class ResamplerWindowApply(Apply):
axis = 0
obj: Union[Resampler, BaseWindow]
obj: Resampler | BaseWindow

def __init__(
self,
obj: Union[Resampler, BaseWindow],
obj: Resampler | BaseWindow,
func: AggFuncType,
args,
kwargs,
Expand Down
Loading