Skip to content

STY: remove --keep-runtime-typing from pyupgrade Part-4 #40804

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 6, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
Optional,
Sequence,
Tuple,
Type,
Type as type_t,
TypeVar,
Union,
)
Expand Down Expand Up @@ -119,7 +119,7 @@
# dtypes
NpDtype = Union[str, np.dtype]
Dtype = Union[
"ExtensionDtype", NpDtype, Type[Union[str, float, int, complex, bool, object]]
"ExtensionDtype", NpDtype, type_t[Union[str, float, int, complex, bool, object]]
]
# DtypeArg specifies all allowable dtypes in a functions its dtype argument
DtypeArg = Union[Dtype, Dict[Hashable, Dtype]]
Expand Down
12 changes: 4 additions & 8 deletions pandas/core/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,7 @@
from __future__ import annotations

import operator
from typing import (
TYPE_CHECKING,
Optional,
Set,
)
from typing import TYPE_CHECKING
import warnings

import numpy as np
Expand Down Expand Up @@ -79,7 +75,7 @@

# -----------------------------------------------------------------------------
# constants
ARITHMETIC_BINOPS: Set[str] = {
ARITHMETIC_BINOPS: set[str] = {
"add",
"sub",
"mul",
Expand All @@ -99,7 +95,7 @@
}


COMPARISON_BINOPS: Set[str] = {"eq", "ne", "lt", "gt", "le", "ge"}
COMPARISON_BINOPS: set[str] = {"eq", "ne", "lt", "gt", "le", "ge"}


# -----------------------------------------------------------------------------
Expand Down Expand Up @@ -207,7 +203,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):


def align_method_FRAME(
left, right, axis, flex: Optional[bool] = False, level: Level = None
left, right, axis, flex: bool | None = False, level: Level = None
):
"""
Convert rhs to meet lhs dims if input is list, tuple or np.ndarray.
Expand Down
20 changes: 8 additions & 12 deletions pandas/core/reshape/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,7 @@
TYPE_CHECKING,
Hashable,
Iterable,
List,
Mapping,
Optional,
Type,
Union,
cast,
overload,
)
Expand Down Expand Up @@ -58,7 +54,7 @@

@overload
def concat(
objs: Union[Iterable[DataFrame], Mapping[Hashable, DataFrame]],
objs: Iterable[DataFrame] | Mapping[Hashable, DataFrame],
axis=0,
join: str = "outer",
ignore_index: bool = False,
Expand All @@ -74,7 +70,7 @@ def concat(

@overload
def concat(
objs: Union[Iterable[NDFrame], Mapping[Hashable, NDFrame]],
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis=0,
join: str = "outer",
ignore_index: bool = False,
Expand All @@ -89,7 +85,7 @@ def concat(


def concat(
objs: Union[Iterable[NDFrame], Mapping[Hashable, NDFrame]],
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis=0,
join="outer",
ignore_index: bool = False,
Expand Down Expand Up @@ -314,7 +310,7 @@ class _Concatenator:

def __init__(
self,
objs: Union[Iterable[NDFrame], Mapping[Hashable, NDFrame]],
objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
axis=0,
join: str = "outer",
keys=None,
Expand Down Expand Up @@ -383,7 +379,7 @@ def __init__(
# get the sample
# want the highest ndim that we have, and must be non-empty
# unless all objs are empty
sample: Optional[NDFrame] = None
sample: NDFrame | None = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
Expand Down Expand Up @@ -474,7 +470,7 @@ def __init__(
self.new_axes = self._get_new_axes()

def get_result(self):
cons: Type[FrameOrSeriesUnion]
cons: type[FrameOrSeriesUnion]
sample: FrameOrSeriesUnion

# series only
Expand Down Expand Up @@ -539,7 +535,7 @@ def _get_result_dim(self) -> int:
else:
return self.objs[0].ndim

def _get_new_axes(self) -> List[Index]:
def _get_new_axes(self) -> list[Index]:
ndim = self._get_result_dim()
return [
self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i)
Expand Down Expand Up @@ -568,7 +564,7 @@ def _get_concat_axis(self) -> Index:
idx = ibase.default_index(len(self.objs))
return idx
elif self.keys is None:
names: List[Hashable] = [None] * len(self.objs)
names: list[Hashable] = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/reshape/melt.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import re
from typing import (
TYPE_CHECKING,
List,
cast,
)
import warnings
Expand Down Expand Up @@ -494,7 +493,7 @@ def wide_to_long(
two 2.9
"""

def get_var_names(df, stub: str, sep: str, suffix: str) -> List[str]:
def get_var_names(df, stub: str, sep: str, suffix: str) -> list[str]:
regex = fr"^{re.escape(stub)}{re.escape(sep)}{suffix}$"
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
Expand Down
67 changes: 32 additions & 35 deletions pandas/core/reshape/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,6 @@
from typing import (
TYPE_CHECKING,
Hashable,
List,
Optional,
Tuple,
cast,
)
import warnings
Expand Down Expand Up @@ -94,16 +91,16 @@ def merge(
left: FrameOrSeriesUnion,
right: FrameOrSeriesUnion,
how: str = "inner",
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate: Optional[str] = None,
validate: str | None = None,
) -> DataFrame:
op = _MergeOperation(
left,
Expand Down Expand Up @@ -143,7 +140,7 @@ def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces):
by = [by]

lby = left.groupby(by, sort=False)
rby: Optional[groupby.DataFrameGroupBy] = None
rby: groupby.DataFrameGroupBy | None = None

# if we can groupby the rhs
# then we can get vastly better perf
Expand Down Expand Up @@ -186,12 +183,12 @@ def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces):
def merge_ordered(
left: DataFrame,
right: DataFrame,
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_by=None,
right_by=None,
fill_method: Optional[str] = None,
fill_method: str | None = None,
suffixes: Suffixes = ("_x", "_y"),
how: str = "outer",
) -> DataFrame:
Expand Down Expand Up @@ -327,9 +324,9 @@ def _merger(x, y) -> DataFrame:
def merge_asof(
left: DataFrame,
right: DataFrame,
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
by=None,
Expand Down Expand Up @@ -614,17 +611,17 @@ def __init__(
left: FrameOrSeriesUnion,
right: FrameOrSeriesUnion,
how: str = "inner",
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
axis: int = 1,
left_index: bool = False,
right_index: bool = False,
sort: bool = True,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate: Optional[str] = None,
validate: str | None = None,
):
_left = _validate_operand(left)
_right = _validate_operand(right)
Expand All @@ -650,7 +647,7 @@ def __init__(

self.indicator = indicator

self.indicator_name: Optional[str]
self.indicator_name: str | None
if isinstance(self.indicator, str):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
Expand Down Expand Up @@ -743,14 +740,14 @@ def get_result(self) -> DataFrame:
return result.__finalize__(self, method="merge")

def _maybe_drop_cross_column(
self, result: DataFrame, cross_col: Optional[str]
self, result: DataFrame, cross_col: str | None
) -> None:
if cross_col is not None:
result.drop(columns=cross_col, inplace=True)

def _indicator_pre_merge(
self, left: DataFrame, right: DataFrame
) -> Tuple[DataFrame, DataFrame]:
) -> tuple[DataFrame, DataFrame]:

columns = left.columns.union(right.columns)

Expand Down Expand Up @@ -830,8 +827,8 @@ def _maybe_restore_index_levels(self, result: DataFrame) -> None:
def _maybe_add_join_keys(
self,
result: DataFrame,
left_indexer: Optional[np.ndarray],
right_indexer: Optional[np.ndarray],
left_indexer: np.ndarray | None,
right_indexer: np.ndarray | None,
) -> None:

left_has_missing = None
Expand Down Expand Up @@ -1274,7 +1271,7 @@ def _maybe_coerce_merge_keys(self) -> None:

def _create_cross_configuration(
self, left: DataFrame, right: DataFrame
) -> Tuple[DataFrame, DataFrame, str, str]:
) -> tuple[DataFrame, DataFrame, str, str]:
"""
Creates the configuration to dispatch the cross operation to inner join,
e.g. adding a join column and resetting parameters. Join column is added
Expand Down Expand Up @@ -1498,7 +1495,7 @@ def restore_dropped_levels_multijoin(
join_index: Index,
lindexer: np.ndarray,
rindexer: np.ndarray,
) -> Tuple[List[Index], np.ndarray, List[Hashable]]:
) -> tuple[list[Index], np.ndarray, list[Hashable]]:
"""
*this is an internal non-public method*

Expand Down Expand Up @@ -1592,15 +1589,15 @@ def __init__(
self,
left: DataFrame,
right: DataFrame,
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
axis: int = 1,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
fill_method: Optional[str] = None,
fill_method: str | None = None,
how: str = "outer",
):

Expand Down Expand Up @@ -1686,9 +1683,9 @@ def __init__(
self,
left: DataFrame,
right: DataFrame,
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
by=None,
Expand All @@ -1697,7 +1694,7 @@ def __init__(
axis: int = 1,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
fill_method: Optional[str] = None,
fill_method: str | None = None,
how: str = "asof",
tolerance=None,
allow_exact_matches: bool = True,
Expand Down Expand Up @@ -2031,7 +2028,7 @@ def _left_join_on_index(

def _factorize_keys(
lk: ArrayLike, rk: ArrayLike, sort: bool = True, how: str = "inner"
) -> Tuple[np.ndarray, np.ndarray, int]:
) -> tuple[np.ndarray, np.ndarray, int]:
"""
Encode left and right keys as enumerated types.

Expand Down
Loading