Skip to content

TYP: use from __future__ import annotations more - batch 4 #41896

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jun 9, 2021
39 changes: 18 additions & 21 deletions pandas/compat/numpy/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,9 @@
methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from typing import (
Any,
Dict,
Optional,
Union,
)
from __future__ import annotations

from typing import Any

from numpy import ndarray

Expand All @@ -41,7 +38,7 @@ def __init__(
self,
defaults,
fname=None,
method: Optional[str] = None,
method: str | None = None,
max_fname_arg_count=None,
):
self.fname = fname
Expand All @@ -55,7 +52,7 @@ def __call__(
kwargs,
fname=None,
max_fname_arg_count=None,
method: Optional[str] = None,
method: str | None = None,
) -> None:
if args or kwargs:
fname = self.fname if fname is None else fname
Expand Down Expand Up @@ -119,7 +116,7 @@ def validate_argmax_with_skipna(skipna, args, kwargs):
return skipna


ARGSORT_DEFAULTS: Dict[str, Optional[Union[int, str]]] = {}
ARGSORT_DEFAULTS: dict[str, int | str | None] = {}
ARGSORT_DEFAULTS["axis"] = -1
ARGSORT_DEFAULTS["kind"] = "quicksort"
ARGSORT_DEFAULTS["order"] = None
Expand All @@ -132,7 +129,7 @@ def validate_argmax_with_skipna(skipna, args, kwargs):

# two different signatures of argsort, this second validation for when the
# `kind` param is supported
ARGSORT_DEFAULTS_KIND: Dict[str, Optional[int]] = {}
ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {}
ARGSORT_DEFAULTS_KIND["axis"] = -1
ARGSORT_DEFAULTS_KIND["order"] = None
validate_argsort_kind = CompatValidator(
Expand All @@ -155,7 +152,7 @@ def validate_argsort_with_ascending(ascending, args, kwargs):
return ascending


CLIP_DEFAULTS: Dict[str, Any] = {"out": None}
CLIP_DEFAULTS: dict[str, Any] = {"out": None}
validate_clip = CompatValidator(
CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
)
Expand All @@ -176,7 +173,7 @@ def validate_clip_with_axis(axis, args, kwargs):
return axis


CUM_FUNC_DEFAULTS: Dict[str, Any] = {}
CUM_FUNC_DEFAULTS: dict[str, Any] = {}
CUM_FUNC_DEFAULTS["dtype"] = None
CUM_FUNC_DEFAULTS["out"] = None
validate_cum_func = CompatValidator(
Expand All @@ -201,7 +198,7 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
return skipna


ALLANY_DEFAULTS: Dict[str, Optional[bool]] = {}
ALLANY_DEFAULTS: dict[str, bool | None] = {}
ALLANY_DEFAULTS["dtype"] = None
ALLANY_DEFAULTS["out"] = None
ALLANY_DEFAULTS["keepdims"] = False
Expand All @@ -224,28 +221,28 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
)

RESHAPE_DEFAULTS: Dict[str, str] = {"order": "C"}
RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"}
validate_reshape = CompatValidator(
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
)

REPEAT_DEFAULTS: Dict[str, Any] = {"axis": None}
REPEAT_DEFAULTS: dict[str, Any] = {"axis": None}
validate_repeat = CompatValidator(
REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
)

ROUND_DEFAULTS: Dict[str, Any] = {"out": None}
ROUND_DEFAULTS: dict[str, Any] = {"out": None}
validate_round = CompatValidator(
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
)

SORT_DEFAULTS: Dict[str, Optional[Union[int, str]]] = {}
SORT_DEFAULTS: dict[str, int | str | None] = {}
SORT_DEFAULTS["axis"] = -1
SORT_DEFAULTS["kind"] = "quicksort"
SORT_DEFAULTS["order"] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")

STAT_FUNC_DEFAULTS: Dict[str, Optional[Any]] = {}
STAT_FUNC_DEFAULTS: dict[str, Any | None] = {}
STAT_FUNC_DEFAULTS["dtype"] = None
STAT_FUNC_DEFAULTS["out"] = None

Expand Down Expand Up @@ -279,13 +276,13 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
)

STAT_DDOF_FUNC_DEFAULTS: Dict[str, Optional[bool]] = {}
STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {}
STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
STAT_DDOF_FUNC_DEFAULTS["out"] = None
STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")

TAKE_DEFAULTS: Dict[str, Optional[str]] = {}
TAKE_DEFAULTS: dict[str, str | None] = {}
TAKE_DEFAULTS["out"] = None
TAKE_DEFAULTS["mode"] = "raise"
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
Expand Down Expand Up @@ -392,7 +389,7 @@ def validate_resampler_func(method: str, args, kwargs) -> None:
raise TypeError("too many arguments passed in")


def validate_minmax_axis(axis: Optional[int], ndim: int = 1) -> None:
def validate_minmax_axis(axis: int | None, ndim: int = 1) -> None:
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is zero
or None, as otherwise it will be incorrectly ignored.
Expand Down
12 changes: 6 additions & 6 deletions pandas/core/array_algos/replace.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
"""
Methods used by Block.replace and related methods.
"""
from __future__ import annotations

import operator
import re
from typing import (
Any,
Optional,
Pattern,
Union,
)

import numpy as np
Expand Down Expand Up @@ -42,8 +42,8 @@ def should_use_regex(regex: bool, to_replace: Any) -> bool:


def compare_or_regex_search(
a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: np.ndarray
) -> Union[ArrayLike, bool]:
a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: np.ndarray
) -> ArrayLike | bool:
"""
Compare two array_like inputs of the same shape or two scalar values

Expand All @@ -65,7 +65,7 @@ def compare_or_regex_search(
return ~mask

def _check_comparison_types(
result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern]
result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
):
"""
Raises an error if the two arrays (a,b) cannot be compared.
Expand Down Expand Up @@ -115,7 +115,7 @@ def _check_comparison_types(
return result


def replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: Optional[np.ndarray]):
def replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: np.ndarray | None):
"""
Parameters
----------
Expand Down
21 changes: 9 additions & 12 deletions pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
:func:`~pandas.eval` parsers.
"""
from __future__ import annotations

import ast
from functools import (
Expand All @@ -11,10 +12,6 @@
import tokenize
from typing import (
Callable,
Optional,
Set,
Tuple,
Type,
TypeVar,
)

Expand Down Expand Up @@ -50,7 +47,7 @@
import pandas.io.formats.printing as printing


def _rewrite_assign(tok: Tuple[int, str]) -> Tuple[int, str]:
def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]:
"""
Rewrite the assignment operator for PyTables expressions that use ``=``
as a substitute for ``==``.
Expand All @@ -69,7 +66,7 @@ def _rewrite_assign(tok: Tuple[int, str]) -> Tuple[int, str]:
return toknum, "==" if tokval == "=" else tokval


def _replace_booleans(tok: Tuple[int, str]) -> Tuple[int, str]:
def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]:
"""
Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Expand All @@ -94,7 +91,7 @@ def _replace_booleans(tok: Tuple[int, str]) -> Tuple[int, str]:
return toknum, tokval


def _replace_locals(tok: Tuple[int, str]) -> Tuple[int, str]:
def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:
"""
Replace local variables with a syntactically valid name.

Expand Down Expand Up @@ -271,7 +268,7 @@ def f(self, *args, **kwargs):
_T = TypeVar("_T", bound="BaseExprVisitor")


def disallow(nodes: Set[str]) -> Callable[[Type[_T]], Type[_T]]:
def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]:
"""
Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Expand All @@ -281,7 +278,7 @@ def disallow(nodes: Set[str]) -> Callable[[Type[_T]], Type[_T]]:
callable
"""

def disallowed(cls: Type[_T]) -> Type[_T]:
def disallowed(cls: type[_T]) -> type[_T]:
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node)
Expand Down Expand Up @@ -352,7 +349,7 @@ class BaseExprVisitor(ast.NodeVisitor):
preparser : callable
"""

const_type: Type[Term] = Constant
const_type: type[Term] = Constant
term_type = Term

binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS
Expand Down Expand Up @@ -390,7 +387,7 @@ class BaseExprVisitor(ast.NodeVisitor):
ast.NotIn: ast.NotIn,
}

unsupported_nodes: Tuple[str, ...]
unsupported_nodes: tuple[str, ...]

def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
Expand Down Expand Up @@ -798,7 +795,7 @@ def __init__(
expr,
engine: str = "numexpr",
parser: str = "pandas",
env: Optional[Scope] = None,
env: Scope | None = None,
level: int = 0,
):
self.expr = expr
Expand Down
19 changes: 8 additions & 11 deletions pandas/core/computation/expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,9 @@
Offer fast expression evaluation through numexpr

"""
from __future__ import annotations

import operator
from typing import (
List,
Optional,
Set,
)
import warnings

import numpy as np
Expand All @@ -25,11 +22,11 @@
if NUMEXPR_INSTALLED:
import numexpr as ne

_TEST_MODE: Optional[bool] = None
_TEST_RESULT: List[bool] = []
_TEST_MODE: bool | None = None
_TEST_RESULT: list[bool] = []
USE_NUMEXPR = NUMEXPR_INSTALLED
_evaluate: Optional[FuncType] = None
_where: Optional[FuncType] = None
_evaluate: FuncType | None = None
_where: FuncType | None = None

# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
Expand Down Expand Up @@ -79,7 +76,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check):
# required min elements (otherwise we are adding overhead)
if a.size > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes: Set[str] = set()
dtypes: set[str] = set()
for o in [a, b]:
# ndarray and Series Case
if hasattr(o, "dtype"):
Expand Down Expand Up @@ -277,7 +274,7 @@ def _store_test_result(used_numexpr: bool) -> None:
_TEST_RESULT.append(used_numexpr)


def get_test_result() -> List[bool]:
def get_test_result() -> list[bool]:
"""
Get test result and reset test_results.
"""
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/dtypes/common.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
"""
Common type operations.
"""
from __future__ import annotations

from typing import (
Any,
Callable,
Union,
)
import warnings

Expand Down Expand Up @@ -102,7 +102,7 @@ def ensure_float(arr):
ensure_object = algos.ensure_object


def ensure_str(value: Union[bytes, Any]) -> str:
def ensure_str(value: bytes | Any) -> str:
"""
Ensure that bytes and non-strings get converted into ``str`` objects.
"""
Expand All @@ -113,7 +113,7 @@ def ensure_str(value: Union[bytes, Any]) -> str:
return value


def ensure_python_int(value: Union[int, np.integer]) -> int:
def ensure_python_int(value: int | np.integer) -> int:
"""
Ensure that a value is a python int.

Expand Down
1 change: 1 addition & 0 deletions pandas/core/indexes/timedeltas.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
""" implement the TimedeltaIndex """
from __future__ import annotations

from pandas._libs import (
index as libindex,
Expand Down
7 changes: 2 additions & 5 deletions pandas/io/parsers/python_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,7 @@
import numpy as np

import pandas._libs.lib as lib
from pandas._typing import (
FilePathOrBuffer,
Union,
)
from pandas._typing import FilePathOrBuffer
from pandas.errors import (
EmptyDataError,
ParserError,
Expand All @@ -42,7 +39,7 @@


class PythonParser(ParserBase):
def __init__(self, f: Union[FilePathOrBuffer, list], **kwds):
def __init__(self, f: FilePathOrBuffer | list, **kwds):
"""
Workhorse function for processing nested list into DataFrame
"""
Expand Down
Loading