diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 88d18e3e230c6..08a5b34167552 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -150,9 +150,15 @@ repos: (?x) \#\ type:\ (?!ignore)| \#\ type:\s?ignore(?!\[)| - \)\ ->\ \" + + # string return type annotation + \)\ ->\ \"| + + # string type annotation in generic + (Callable|Dict|Iterable|Iterator|List|Mapping|MutableMapping|Sequence|Set|Tuple|Type|Union)\[[^\]]*\".+?\"[^\]]*\] language: pygrep types: [python] + exclude: ^pandas/_typing\.py$ - id: np-bool name: Check for use of np.bool instead of np.bool_ entry: np\.bool[^_8] diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 2145551833e90..bf9fdbf82dd0e 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -3,6 +3,8 @@ kwarg aggregations in groupby and DataFrame/Series aggregation """ +from __future__ import annotations + from collections import defaultdict from functools import partial from typing import ( @@ -296,7 +298,7 @@ def relabel_result( func: Dict[str, List[Union[Callable, str]]], columns: Iterable[Hashable], order: Iterable[int], -) -> Dict[Hashable, "Series"]: +) -> Dict[Hashable, Series]: """ Internal function to reorder result if relabelling is True for dataframe.agg, and return the reordered result in dict. @@ -323,7 +325,7 @@ def relabel_result( reordered_indexes = [ pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1]) ] - reordered_result_in_dict: Dict[Hashable, "Series"] = {} + reordered_result_in_dict: Dict[Hashable, Series] = {} idx = 0 reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1 diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 085ad5e6a0dcf..902ffe82c1062 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -567,7 +567,7 @@ def factorize( sort: bool = False, na_sentinel: Optional[int] = -1, size_hint: Optional[int] = None, -) -> Tuple[np.ndarray, Union[np.ndarray, "Index"]]: +) -> Tuple[np.ndarray, Union[np.ndarray, Index]]: """ Encode the object as an enumerated type or categorical variable. diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 2cb8d58c7ec39..dd281a39907fd 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -72,7 +72,7 @@ def numpy_dtype(self) -> np.dtype: return np.dtype("bool") @classmethod - def construct_array_type(cls) -> Type["BooleanArray"]: + def construct_array_type(cls) -> Type[BooleanArray]: """ Return the array type associated with this dtype. @@ -94,7 +94,7 @@ def _is_numeric(self) -> bool: return True def __from_arrow__( - self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"] + self, array: Union[pyarrow.Array, pyarrow.ChunkedArray] ) -> BooleanArray: """ Construct BooleanArray from pyarrow Array/ChunkedArray. diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 2c04d5f4d45c6..f3d3d61fc0ca1 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -422,7 +422,7 @@ def dtype(self) -> CategoricalDtype: return self._dtype @property - def _constructor(self) -> Type["Categorical"]: + def _constructor(self) -> Type[Categorical]: return Categorical @classmethod diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py index dc46cf9e3cf68..2c3b3d3c2f0b4 100644 --- a/pandas/core/arrays/floating.py +++ b/pandas/core/arrays/floating.py @@ -47,7 +47,7 @@ def _is_numeric(self) -> bool: return True @classmethod - def construct_array_type(cls) -> Type["FloatingArray"]: + def construct_array_type(cls) -> Type[FloatingArray]: """ Return the array type associated with this dtype. diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index f128d2ee6c92f..ff1af80f81ac6 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -57,7 +57,7 @@ def _is_numeric(self) -> bool: return True @classmethod - def construct_array_type(cls) -> Type["IntegerArray"]: + def construct_array_type(cls) -> Type[IntegerArray]: """ Return the array type associated with this dtype. diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py index ed9e37bd68184..49f0d7e66c005 100644 --- a/pandas/core/arrays/numeric.py +++ b/pandas/core/arrays/numeric.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import datetime import numbers from typing import TYPE_CHECKING, Any, List, Union @@ -25,7 +27,7 @@ class NumericDtype(BaseMaskedDtype): def __from_arrow__( - self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"] + self, array: Union[pyarrow.Array, pyarrow.ChunkedArray] ) -> BaseMaskedArray: """ Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray. diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 85dffb1113d35..787f89ab15679 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -89,7 +89,7 @@ def construct_from_string(cls, string: str) -> PandasDtype: return cls(dtype) @classmethod - def construct_array_type(cls) -> Type["PandasArray"]: + def construct_array_type(cls) -> Type[PandasArray]: """ Return the array type associated with this dtype. @@ -155,7 +155,7 @@ class PandasArray( # ------------------------------------------------------------------------ # Constructors - def __init__(self, values: Union[np.ndarray, "PandasArray"], copy: bool = False): + def __init__(self, values: Union[np.ndarray, PandasArray], copy: bool = False): if isinstance(values, type(self)): values = values._ndarray if not isinstance(values, np.ndarray): diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 749ec0a2b8848..e2d565eb6edf1 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -201,7 +201,7 @@ def _simple_new( @classmethod def _from_sequence( - cls: Type["PeriodArray"], + cls: Type[PeriodArray], scalars: Union[Sequence[Optional[Period]], AnyArrayLike], *, dtype: Optional[Dtype] = None, diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 4cae2e48c84c8..62d6ca745b992 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -896,7 +896,7 @@ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: return taken - def _take_without_fill(self, indices) -> Union[np.ndarray, "SparseArray"]: + def _take_without_fill(self, indices) -> Union[np.ndarray, SparseArray]: to_shift = indices < 0 indices = indices.copy() diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 4e4c8f1aad671..4c1c1b42ff6fa 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -173,7 +173,7 @@ def __repr__(self) -> str: return self.name @classmethod - def construct_array_type(cls) -> Type["SparseArray"]: + def construct_array_type(cls) -> Type[SparseArray]: """ Return the array type associated with this dtype. diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 3234d36b3dbe7..2e4580207bc8a 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -71,7 +71,7 @@ def type(self) -> Type[str]: return str @classmethod - def construct_array_type(cls) -> Type["StringArray"]: + def construct_array_type(cls) -> Type[StringArray]: """ Return the array type associated with this dtype. @@ -85,7 +85,7 @@ def __repr__(self) -> str: return "StringDtype" def __from_arrow__( - self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"] + self, array: Union[pyarrow.Array, pyarrow.ChunkedArray] ) -> StringArray: """ Construct StringArray from pyarrow Array/ChunkedArray. diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 4c073883abf89..cdca67237698b 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -87,7 +87,7 @@ def type(self) -> Type[str]: return str @classmethod - def construct_array_type(cls) -> Type["ArrowStringArray"]: + def construct_array_type(cls) -> Type[ArrowStringArray]: """ Return the array type associated with this dtype. @@ -104,7 +104,7 @@ def __repr__(self) -> str: return "ArrowStringDtype" def __from_arrow__( - self, array: Union["pa.Array", "pa.ChunkedArray"] + self, array: Union[pa.Array, pa.ChunkedArray] ) -> ArrowStringArray: """ Construct StringArray from pyarrow Array/ChunkedArray. diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 74bee80c6c8a6..7b42b21cadc1f 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -2,6 +2,8 @@ Operator classes for eval. """ +from __future__ import annotations + from datetime import datetime from distutils.version import LooseVersion from functools import partial @@ -203,7 +205,7 @@ class Op: op: str - def __init__(self, op: str, operands: Iterable[Union[Term, "Op"]], encoding=None): + def __init__(self, op: str, operands: Iterable[Union[Term, Op]], encoding=None): self.op = _bool_op_map.get(op, op) self.operands = operands self.encoding = encoding diff --git a/pandas/core/describe.py b/pandas/core/describe.py index 22de5ae1e082f..fa3fbc865c5bd 100644 --- a/pandas/core/describe.py +++ b/pandas/core/describe.py @@ -164,7 +164,7 @@ def describe_frame( return d -def reorder_columns(ldesc: Sequence["Series"]) -> List[Hashable]: +def reorder_columns(ldesc: Sequence[Series]) -> List[Hashable]: """Set a convenient order for rows for display.""" names: List[Hashable] = [] ldesc_indexes = sorted((x.index for x in ldesc), key=len) diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 6adb4984d156e..0c0c56ff39280 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -2,6 +2,8 @@ Extend pandas with custom array types. """ +from __future__ import annotations + from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union import numpy as np @@ -186,7 +188,7 @@ def names(self) -> Optional[List[str]]: return None @classmethod - def construct_array_type(cls) -> Type["ExtensionArray"]: + def construct_array_type(cls) -> Type[ExtensionArray]: """ Return the array type associated with this dtype. diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 0941967ef6bee..90513cf0550b2 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -397,7 +397,7 @@ def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj: def maybe_cast_to_extension_array( - cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None + cls: Type[ExtensionArray], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None ) -> ArrayLike: """ Call to `_from_sequence` that returns the object unchanged on Exception. diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index cefab33976ba8..ac02abc64cfa4 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -59,7 +59,7 @@ class PandasExtensionDtype(ExtensionDtype): base = None isbuiltin = 0 isnative = 0 - _cache: Dict[str_type, "PandasExtensionDtype"] = {} + _cache: Dict[str_type, PandasExtensionDtype] = {} def __str__(self) -> str_type: """ @@ -445,7 +445,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int: return np.bitwise_xor.reduce(hashed) @classmethod - def construct_array_type(cls) -> Type["Categorical"]: + def construct_array_type(cls) -> Type[Categorical]: """ Return the array type associated with this dtype. @@ -514,7 +514,7 @@ def validate_categories(categories, fastpath: bool = False): return categories def update_dtype( - self, dtype: Union[str_type, "CategoricalDtype"] + self, dtype: Union[str_type, CategoricalDtype] ) -> CategoricalDtype: """ Returns a CategoricalDtype with categories and ordered taken from dtype @@ -647,7 +647,7 @@ class DatetimeTZDtype(PandasExtensionDtype): _match = re.compile(r"(datetime64|M8)\[(?P.+), (?P.+)\]") _cache: Dict[str_type, PandasExtensionDtype] = {} - def __init__(self, unit: Union[str_type, "DatetimeTZDtype"] = "ns", tz=None): + def __init__(self, unit: Union[str_type, DatetimeTZDtype] = "ns", tz=None): if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] @@ -694,7 +694,7 @@ def tz(self): return self._tz @classmethod - def construct_array_type(cls) -> Type["DatetimeArray"]: + def construct_array_type(cls) -> Type[DatetimeArray]: """ Return the array type associated with this dtype. @@ -940,7 +940,7 @@ def is_dtype(cls, dtype: object) -> bool: return super().is_dtype(dtype) @classmethod - def construct_array_type(cls) -> Type["PeriodArray"]: + def construct_array_type(cls) -> Type[PeriodArray]: """ Return the array type associated with this dtype. @@ -953,7 +953,7 @@ def construct_array_type(cls) -> Type["PeriodArray"]: return PeriodArray def __from_arrow__( - self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"] + self, array: Union[pyarrow.Array, pyarrow.ChunkedArray] ) -> PeriodArray: """ Construct PeriodArray from pyarrow Array/ChunkedArray. @@ -1090,7 +1090,7 @@ def subtype(self): return self._subtype @classmethod - def construct_array_type(cls) -> Type["IntervalArray"]: + def construct_array_type(cls) -> Type[IntervalArray]: """ Return the array type associated with this dtype. @@ -1184,7 +1184,7 @@ def is_dtype(cls, dtype: object) -> bool: return super().is_dtype(dtype) def __from_arrow__( - self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"] + self, array: Union[pyarrow.Array, pyarrow.ChunkedArray] ) -> IntervalArray: """ Construct IntervalArray from pyarrow Array/ChunkedArray. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ffc84ad94459a..c3cfea93d0cc7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9440,8 +9440,8 @@ def resample( base: Optional[int] = None, on=None, level=None, - origin: Union[str, "TimestampConvertibleTypes"] = "start_day", - offset: Optional["TimedeltaConvertibleTypes"] = None, + origin: Union[str, TimestampConvertibleTypes] = "start_day", + offset: Optional[TimedeltaConvertibleTypes] = None, ) -> Resampler: return super().resample( rule=rule, diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 2bcd5964d3736..50dbfe2596a77 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1690,7 +1690,7 @@ def _wrap_transformed_output( return result - def _wrap_agged_blocks(self, blocks: Sequence["Block"], items: Index) -> DataFrame: + def _wrap_agged_blocks(self, blocks: Sequence[Block], items: Index) -> DataFrame: if not self.as_index: index = np.arange(blocks[0].values.shape[-1]) mgr = BlockManager(blocks, axes=[items, index]) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 35a3768be7e73..f41a46456b36a 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1371,7 +1371,7 @@ class GroupBy(BaseGroupBy[FrameOrSeries]): @final @property - def _obj_1d_constructor(self) -> Type["Series"]: + def _obj_1d_constructor(self) -> Type[Series]: # GH28330 preserve subclassed Series/DataFrames if isinstance(self.obj, DataFrame): return self.obj._constructor_sliced diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 00cc762c7c136..c7dc6d021a4c3 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -2,6 +2,8 @@ Provide user facing operators for doing the split part of the split-apply-combine paradigm. """ +from __future__ import annotations + from typing import Dict, Hashable, List, Optional, Set, Tuple import warnings @@ -616,7 +618,7 @@ def get_grouper( mutated: bool = False, validate: bool = True, dropna: bool = True, -) -> Tuple["ops.BaseGrouper", Set[Hashable], FrameOrSeries]: +) -> Tuple[ops.BaseGrouper, Set[Hashable], FrameOrSeries]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 517b848742541..1b1406fe9cd0f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -100,7 +100,7 @@ class BaseGrouper: def __init__( self, axis: Index, - groupings: Sequence["grouper.Grouping"], + groupings: Sequence[grouper.Grouping], sort: bool = True, group_keys: bool = True, mutated: bool = False, @@ -119,7 +119,7 @@ def __init__( self.dropna = dropna @property - def groupings(self) -> List["grouper.Grouping"]: + def groupings(self) -> List[grouper.Grouping]: return self._groupings @property diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 0b46b43514d92..9d84d391a07cb 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3455,7 +3455,7 @@ def _get_nearest_indexer(self, target: "Index", limit, tolerance) -> np.ndarray: @final def _filter_indexer_tolerance( self, - target: Union["Index", np.ndarray, ExtensionArray], + target: Union[Index, np.ndarray, ExtensionArray], indexer: np.ndarray, tolerance, ) -> np.ndarray: @@ -4519,7 +4519,7 @@ def append(self, other): return self._concat(to_concat, name) - def _concat(self, to_concat: List["Index"], name: Hashable) -> Index: + def _concat(self, to_concat: List[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index a8a872ff38fb8..3b274920018d9 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -605,7 +605,7 @@ def map(self, mapper): mapped = self._values.map(mapper) return Index(mapped, name=self.name) - def _concat(self, to_concat: List["Index"], name: Hashable) -> Index: + def _concat(self, to_concat: List[Index], name: Hashable) -> Index: # if calling index is category, don't check dtype of others try: codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat]) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 7a178a29b2fd6..0df954e054826 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -404,7 +404,7 @@ def union_many(self, others): return this.rename(res_name) return this - def _maybe_utc_convert(self, other: Index) -> Tuple["DatetimeIndex", Index]: + def _maybe_utc_convert(self, other: Index) -> Tuple[DatetimeIndex, Index]: this = self if isinstance(other, DatetimeIndex): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 5bd406bfdbc55..267654076127b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -472,7 +472,7 @@ def argsort(self, *args, **kwargs) -> np.ndarray: def factorize( self, sort: bool = False, na_sentinel: Optional[int] = -1 - ) -> Tuple[np.ndarray, "RangeIndex"]: + ) -> Tuple[np.ndarray, RangeIndex]: codes = np.arange(len(self), dtype=np.intp) uniques = self if sort and self.step < 0: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 1356b9d3b2ca3..28405e6eeacb3 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -370,7 +370,7 @@ def delete(self, loc) -> None: self.values = np.delete(self.values, loc, 0) self.mgr_locs = self.mgr_locs.delete(loc) - def apply(self, func, **kwargs) -> List["Block"]: + def apply(self, func, **kwargs) -> List[Block]: """ apply the function to my values; return a block if we are not one @@ -380,7 +380,7 @@ def apply(self, func, **kwargs) -> List["Block"]: return self._split_op_result(result) - def reduce(self, func, ignore_failures: bool = False) -> List["Block"]: + def reduce(self, func, ignore_failures: bool = False) -> List[Block]: # We will apply the function and reshape the result into a single-row # Block with the same mgr_locs; squeezing will be done at a higher level assert self.ndim == 2 @@ -401,7 +401,7 @@ def reduce(self, func, ignore_failures: bool = False) -> List["Block"]: nb = self.make_block(res_values) return [nb] - def _split_op_result(self, result) -> List["Block"]: + def _split_op_result(self, result) -> List[Block]: # See also: split_and_operate if is_extension_array_dtype(result) and result.ndim > 1: # TODO(EA2D): unnecessary with 2D EAs @@ -420,7 +420,7 @@ def _split_op_result(self, result) -> List["Block"]: def fillna( self, value, limit=None, inplace: bool = False, downcast=None - ) -> List["Block"]: + ) -> List[Block]: """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again @@ -461,7 +461,7 @@ def f(mask, val, idx): return self.split_and_operate(None, f, inplace) - def _split(self) -> List["Block"]: + def _split(self) -> List[Block]: """ Split a block into a list of single-column blocks. """ @@ -477,7 +477,7 @@ def _split(self) -> List["Block"]: def split_and_operate( self, mask, f, inplace: bool, ignore_failures: bool = False - ) -> List["Block"]: + ) -> List[Block]: """ split the block per-column, and apply the callable f per-column, return a new block for each. Handle @@ -545,7 +545,7 @@ def make_a_block(nv, ref_loc): return new_blocks - def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]: + def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: # no need to downcast our float # unless indicated @@ -554,7 +554,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] return extend_blocks([b.downcast(downcast) for b in blocks]) - def downcast(self, dtypes=None) -> List["Block"]: + def downcast(self, dtypes=None) -> List[Block]: """ try to downcast each item to the dict of dtypes if present """ # turn it off completely if dtypes is False: @@ -670,7 +670,7 @@ def convert( datetime: bool = True, numeric: bool = True, timedelta: bool = True, - ) -> List["Block"]: + ) -> List[Block]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock @@ -728,7 +728,7 @@ def replace( value, inplace: bool = False, regex: bool = False, - ) -> List["Block"]: + ) -> List[Block]: """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. @@ -773,7 +773,7 @@ def _replace_regex( inplace: bool = False, convert: bool = True, mask=None, - ) -> List["Block"]: + ) -> List[Block]: """ Replace elements by the given value. @@ -817,7 +817,7 @@ def _replace_list( dest_list: List[Any], inplace: bool = False, regex: bool = False, - ) -> List["Block"]: + ) -> List[Block]: """ See BlockManager._replace_list docstring. """ @@ -854,7 +854,7 @@ def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray: rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(pairs): - new_rb: List["Block"] = [] + new_rb: List[Block] = [] for blk in rb: m = masks[i] convert = i == src_len # only convert once at the end @@ -998,7 +998,7 @@ def setitem(self, indexer, value): block = self.make_block(values) return block - def putmask(self, mask, new, axis: int = 0) -> List["Block"]: + def putmask(self, mask, new, axis: int = 0) -> List[Block]: """ putmask the data to the block; it is possible that we may create a new dtype of block @@ -1095,7 +1095,7 @@ def interpolate( self, method: str = "pad", axis: int = 0, - index: Optional["Index"] = None, + index: Optional[Index] = None, inplace: bool = False, limit: Optional[int] = None, limit_direction: str = "forward", @@ -1157,7 +1157,7 @@ def _interpolate_with_fill( limit: Optional[int] = None, limit_area: Optional[str] = None, downcast: Optional[str] = None, - ) -> List["Block"]: + ) -> List[Block]: """ fillna but using the interpolate machinery """ inplace = validate_bool_kwarg(inplace, "inplace") @@ -1188,7 +1188,7 @@ def _interpolate( inplace: bool = False, downcast: Optional[str] = None, **kwargs, - ) -> List["Block"]: + ) -> List[Block]: """ interpolate using scipy wrappers """ inplace = validate_bool_kwarg(inplace, "inplace") data = self.values if inplace else self.values.copy() @@ -1264,7 +1264,7 @@ def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_value=lib.no_defau else: return self.make_block_same_class(new_values, new_mgr_locs) - def diff(self, n: int, axis: int = 1) -> List["Block"]: + def diff(self, n: int, axis: int = 1) -> List[Block]: """ return block for the diff of the values """ new_values = algos.diff(self.values, n, axis=axis, stacklevel=7) return [self.make_block(values=new_values)] @@ -1279,7 +1279,7 @@ def shift(self, periods: int, axis: int = 0, fill_value=None): return [self.make_block(new_values)] - def where(self, other, cond, errors="raise", axis: int = 0) -> List["Block"]: + def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: """ evaluate the block; return result block(s) from the result @@ -1355,7 +1355,7 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List["Block"]: cond = cond.swapaxes(axis, 0) mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool) - result_blocks: List["Block"] = [] + result_blocks: List[Block] = [] for m in [mask, ~mask]: if m.any(): result = cast(np.ndarray, result) # EABlock overrides where @@ -1459,7 +1459,7 @@ def _replace_coerce( mask: np.ndarray, inplace: bool = True, regex: bool = False, - ) -> List["Block"]: + ) -> List[Block]: """ Replace value corresponding to the given boolean array with another value. @@ -1577,7 +1577,7 @@ def set_inplace(self, locs, values): assert locs.tolist() == [0] self.values = values - def putmask(self, mask, new, axis: int = 0) -> List["Block"]: + def putmask(self, mask, new, axis: int = 0) -> List[Block]: """ See Block.putmask.__doc__ """ @@ -1779,7 +1779,7 @@ def interpolate( placement=self.mgr_locs, ) - def diff(self, n: int, axis: int = 1) -> List["Block"]: + def diff(self, n: int, axis: int = 1) -> List[Block]: if axis == 0 and n != 0: # n==0 case will be a no-op so let is fall through # Since we only have one column, the result will be all-NA. @@ -1794,7 +1794,7 @@ def diff(self, n: int, axis: int = 1) -> List["Block"]: def shift( self, periods: int, axis: int = 0, fill_value: Any = None - ) -> List["ExtensionBlock"]: + ) -> List[ExtensionBlock]: """ Shift the block by `periods`. @@ -1809,7 +1809,7 @@ def shift( ) ] - def where(self, other, cond, errors="raise", axis: int = 0) -> List["Block"]: + def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: cond = _extract_bool_array(cond) assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) @@ -1999,7 +1999,7 @@ def iget(self, key): # TODO(EA2D): this can be removed if we ever have 2D EA return self.array_values().reshape(self.shape)[key] - def diff(self, n: int, axis: int = 0) -> List["Block"]: + def diff(self, n: int, axis: int = 0) -> List[Block]: """ 1st discrete difference. @@ -2040,7 +2040,7 @@ def to_native_types(self, na_rep="NaT", **kwargs): result = arr._format_native_types(na_rep=na_rep, **kwargs) return self.make_block(result) - def where(self, other, cond, errors="raise", axis: int = 0) -> List["Block"]: + def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: # TODO(EA2D): reshape unnecessary with 2D EAs arr = self.array_values().reshape(self.shape) @@ -2329,7 +2329,7 @@ def convert( datetime: bool = True, numeric: bool = True, timedelta: bool = True, - ) -> List["Block"]: + ) -> List[Block]: """ attempt to cast any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! @@ -2359,7 +2359,7 @@ def f(mask, val, idx): return blocks - def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]: + def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: if downcast is not None: return blocks @@ -2376,7 +2376,7 @@ def replace( value, inplace: bool = False, regex: bool = False, - ) -> List["Block"]: + ) -> List[Block]: # Note: the checks we do in NDFrame.replace ensure we never get # here with listlike to_replace or value, as those cases # go through _replace_list @@ -2412,7 +2412,7 @@ def _replace_list( dest_list: List[Any], inplace: bool = False, regex: bool = False, - ) -> List["Block"]: + ) -> List[Block]: if len(algos.unique(dest_list)) == 1: # We likely got here by tiling value inside NDFrame.replace, # so un-tile here @@ -2425,7 +2425,7 @@ def replace( value, inplace: bool = False, regex: bool = False, - ) -> List["Block"]: + ) -> List[Block]: inplace = validate_bool_kwarg(inplace, "inplace") result = self if inplace else self.copy() diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 32b6f9d64dd8d..0b611bfdb1f10 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from collections import defaultdict import copy import itertools @@ -36,7 +38,7 @@ def concatenate_block_managers( - mgrs_indexers, axes: List["Index"], concat_axis: int, copy: bool + mgrs_indexers, axes: List[Index], concat_axis: int, copy: bool ) -> Manager: """ Concatenate block managers into one. diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index 562740a275acb..829a60c1889a8 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -51,7 +51,7 @@ def operate_blockwise( # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) - res_blks: List["Block"] = [] + res_blks: List[Block] = [] for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right): res_values = array_op(lvals, rvals) if left_ea and not right_ea and hasattr(res_values, "reshape"): @@ -79,7 +79,7 @@ def operate_blockwise( return new_mgr -def _reset_block_mgr_locs(nbs: List["Block"], locs): +def _reset_block_mgr_locs(nbs: List[Block], locs): """ Reset mgr_locs to correspond to our original DataFrame. """ diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 7e6ff6ae358bb..f9bff603cec38 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -52,7 +52,7 @@ @overload def concat( - objs: Union[Iterable["DataFrame"], Mapping[Hashable, "DataFrame"]], + objs: Union[Iterable[DataFrame], Mapping[Hashable, DataFrame]], axis=0, join: str = "outer", ignore_index: bool = False, @@ -68,7 +68,7 @@ def concat( @overload def concat( - objs: Union[Iterable["NDFrame"], Mapping[Hashable, "NDFrame"]], + objs: Union[Iterable[NDFrame], Mapping[Hashable, NDFrame]], axis=0, join: str = "outer", ignore_index: bool = False, @@ -83,7 +83,7 @@ def concat( def concat( - objs: Union[Iterable["NDFrame"], Mapping[Hashable, "NDFrame"]], + objs: Union[Iterable[NDFrame], Mapping[Hashable, NDFrame]], axis=0, join="outer", ignore_index: bool = False, @@ -308,7 +308,7 @@ class _Concatenator: def __init__( self, - objs: Union[Iterable["NDFrame"], Mapping[Hashable, "NDFrame"]], + objs: Union[Iterable[NDFrame], Mapping[Hashable, NDFrame]], axis=0, join: str = "outer", keys=None, @@ -377,7 +377,7 @@ def __init__( # get the sample # want the highest ndim that we have, and must be non-empty # unless all objs are empty - sample: Optional["NDFrame"] = None + sample: Optional[NDFrame] = None if len(ndims) > 1: max_ndim = max(ndims) for obj in objs: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 18e3f3a48afdb..a3eef92bacfad 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -724,7 +724,7 @@ def _maybe_drop_cross_column(self, result: DataFrame, cross_col: Optional[str]): def _indicator_pre_merge( self, left: DataFrame, right: DataFrame - ) -> Tuple["DataFrame", "DataFrame"]: + ) -> Tuple[DataFrame, DataFrame]: columns = left.columns.union(right.columns) @@ -1232,7 +1232,7 @@ def _maybe_coerce_merge_keys(self): def _create_cross_configuration( self, left: DataFrame, right: DataFrame - ) -> Tuple["DataFrame", "DataFrame", str, str]: + ) -> Tuple[DataFrame, DataFrame, str, str]: """ Creates the configuration to dispatch the cross operation to inner join, e.g. adding a join column and resetting parameters. Join column is added diff --git a/pandas/core/series.py b/pandas/core/series.py index 3888194305d76..fea9b49fa9e97 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -211,8 +211,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): ) __hash__ = generic.NDFrame.__hash__ _mgr: SingleBlockManager - div: Callable[["Series", Any], "Series"] - rdiv: Callable[["Series", Any], "Series"] + div: Callable[[Series, Any], Series] + rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors @@ -398,11 +398,11 @@ def _init_dict(self, data, index=None, dtype: Optional[Dtype] = None): # ---------------------------------------------------------------------- @property - def _constructor(self) -> Type["Series"]: + def _constructor(self) -> Type[Series]: return Series @property - def _constructor_expanddim(self) -> Type["DataFrame"]: + def _constructor_expanddim(self) -> Type[DataFrame]: from pandas.core.frame import DataFrame return DataFrame @@ -1850,7 +1850,7 @@ def unique(self): result = super().unique() return result - def drop_duplicates(self, keep="first", inplace=False) -> Optional["Series"]: + def drop_duplicates(self, keep="first", inplace=False) -> Optional[Series]: """ Return Series with duplicate values removed. @@ -2709,7 +2709,7 @@ def _binop(self, other, func, level=None, fill_value=None): def _construct_result( self, result: Union[ArrayLike, Tuple[ArrayLike, ArrayLike]], name: Hashable - ) -> Union["Series", Tuple["Series", "Series"]]: + ) -> Union[Series, Tuple[Series, Series]]: """ Construct an appropriately-labelled Series from the result of an op. @@ -4401,7 +4401,7 @@ def fillna( inplace=False, limit=None, downcast=None, - ) -> Optional["Series"]: + ) -> Optional[Series]: return super().fillna( value=value, method=method, @@ -4859,8 +4859,8 @@ def resample( base: Optional[int] = None, on=None, level=None, - origin: Union[str, "TimestampConvertibleTypes"] = "start_day", - offset: Optional["TimedeltaConvertibleTypes"] = None, + origin: Union[str, TimestampConvertibleTypes] = "start_day", + offset: Optional[TimedeltaConvertibleTypes] = None, ) -> Resampler: return super().resample( rule=rule, diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 8869533be30fb..da0cafb409e56 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -517,7 +517,7 @@ def ensure_key_mapped(values, key: Optional[Callable], levels=None): def get_flattened_list( comp_ids: np.ndarray, ngroups: int, - levels: Iterable["Index"], + levels: Iterable[Index], labels: Iterable[np.ndarray], ) -> List[Tuple]: """Map compressed group id -> key tuple.""" @@ -532,7 +532,7 @@ def get_flattened_list( def get_indexer_dict( - label_list: List[np.ndarray], keys: List["Index"] + label_list: List[np.ndarray], keys: List[Index] ) -> Dict[Union[str, Tuple], np.ndarray]: """ Returns diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 88ce5865ee8c2..47df65650237e 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -68,13 +68,13 @@ from pandas import Series -# --------------------------------------------------------------------- -# types used in annotations + # --------------------------------------------------------------------- + # types used in annotations -ArrayConvertible = Union[List, Tuple, AnyArrayLike, "Series"] -Scalar = Union[int, float, str] -DatetimeScalar = TypeVar("DatetimeScalar", Scalar, datetime) -DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible] + ArrayConvertible = Union[List, Tuple, AnyArrayLike, Series] + Scalar = Union[int, float, str] + DatetimeScalar = TypeVar("DatetimeScalar", Scalar, datetime) + DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible] # --------------------------------------------------------------------- @@ -571,7 +571,7 @@ def to_datetime( infer_datetime_format: bool = ..., origin=..., cache: bool = ..., -) -> Union[DatetimeScalar, "NaTType"]: +) -> Union[DatetimeScalar, NaTType]: ... @@ -621,7 +621,7 @@ def to_datetime( infer_datetime_format: bool = False, origin="unix", cache: bool = True, -) -> Union[DatetimeIndex, "Series", DatetimeScalar, "NaTType"]: +) -> Union[DatetimeIndex, Series, DatetimeScalar, NaTType]: """ Convert argument to datetime. diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 962ba2c7f9ef7..4fca057976277 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc import datetime from distutils.version import LooseVersion @@ -789,7 +791,7 @@ def save(self): def __init__( self, - path: Union[FilePathOrBuffer, "ExcelWriter"], + path: Union[FilePathOrBuffer, ExcelWriter], engine=None, date_format=None, datetime_format=None, diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 7de958df206d5..71e1bf6b43ad5 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import TYPE_CHECKING, Dict, List, Optional import numpy as np @@ -51,7 +53,7 @@ def save(self): self.book.save(self.handles.handle) @classmethod - def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, "Serialisable"]: + def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, Serialisable]: """ Convert a style_dict to a set of kwargs suitable for initializing or updating-on-copy an openpyxl v2 style object. diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 74756b0c57092..5a7a78b75bb0f 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -91,7 +91,7 @@ def na_rep(self) -> str: return self.fmt.na_rep @property - def float_format(self) -> Optional["FloatFormatType"]: + def float_format(self) -> Optional[FloatFormatType]: return self.fmt.float_format @property diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 2c17551a7c3b9..68d068df601ff 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1502,7 +1502,7 @@ def _format_strings(self) -> List[str]: class Datetime64Formatter(GenericArrayFormatter): def __init__( self, - values: Union[np.ndarray, "Series", DatetimeIndex, DatetimeArray], + values: Union[np.ndarray, Series, DatetimeIndex, DatetimeArray], nat_rep: str = "NaT", date_format: None = None, **kwargs, diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 782562f455607..03e65029fb021 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -828,7 +828,7 @@ def _compute(self): def _apply( self, - func: Callable[..., "Styler"], + func: Callable[..., Styler], axis: Optional[Axis] = 0, subset=None, **kwargs, @@ -867,7 +867,7 @@ def _apply( def apply( self, - func: Callable[..., "Styler"], + func: Callable[..., Styler], axis: Optional[Axis] = 0, subset=None, **kwargs, diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 0791599dad201..e53c828fe30cb 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -74,7 +74,7 @@ def to_json( if orient == "table" and isinstance(obj, Series): obj = obj.to_frame(name=obj.name or "values") - writer: Type["Writer"] + writer: Type[Writer] if orient == "table" and isinstance(obj, DataFrame): writer = JSONTableWriter elif isinstance(obj, Series): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0225811d95244..bd40387e13c90 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -529,7 +529,7 @@ class HDFStore: >>> store.close() # only now, data is written to disk """ - _handle: Optional["File"] + _handle: Optional[File] _mode: str _complevel: int _fletcher32: bool @@ -1471,7 +1471,7 @@ def walk(self, where="/"): yield (g._v_pathname.rstrip("/"), groups, leaves) - def get_node(self, key: str) -> Optional["Node"]: + def get_node(self, key: str) -> Optional[Node]: """ return the node with the key or None if it does not exist """ self._check_if_open() if not key.startswith("/"): @@ -1487,7 +1487,7 @@ def get_node(self, key: str) -> Optional["Node"]: assert isinstance(node, _table_mod.Node), type(node) return node - def get_storer(self, key: str) -> Union["GenericFixed", "Table"]: + def get_storer(self, key: str) -> Union[GenericFixed, Table]: """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: @@ -1621,9 +1621,9 @@ def _create_storer( value: Optional[FrameOrSeries] = None, encoding: str = "UTF-8", errors: str = "strict", - ) -> Union["GenericFixed", "Table"]: + ) -> Union[GenericFixed, Table]: """ return a suitable class to operate """ - cls: Union[Type["GenericFixed"], Type["Table"]] + cls: Union[Type[GenericFixed], Type[Table]] if value is not None and not isinstance(value, (Series, DataFrame)): raise TypeError("value must be None, Series, or DataFrame") @@ -1835,12 +1835,12 @@ class TableIterator: chunksize: Optional[int] store: HDFStore - s: Union["GenericFixed", "Table"] + s: Union[GenericFixed, Table] def __init__( self, store: HDFStore, - s: Union["GenericFixed", "Table"], + s: Union[GenericFixed, Table], func, where, nrows, @@ -2363,7 +2363,7 @@ def get_atom_string(cls, shape, itemsize): return _tables().StringCol(itemsize=itemsize, shape=shape[0]) @classmethod - def get_atom_coltype(cls, kind: str) -> Type["Col"]: + def get_atom_coltype(cls, kind: str) -> Type[Col]: """ return the PyTables column class for this column """ if kind.startswith("uint"): k4 = kind[4:] @@ -3989,7 +3989,7 @@ def get_blk_items(mgr): mgr = frame._mgr mgr = cast(BlockManager, mgr) - blocks: List["Block"] = list(mgr.blocks) + blocks: List[Block] = list(mgr.blocks) blk_items: List[Index] = get_blk_items(mgr) if len(data_columns): @@ -4010,7 +4010,7 @@ def get_blk_items(mgr): tuple(b_items.tolist()): (b, b_items) for b, b_items in zip(blocks, blk_items) } - new_blocks: List["Block"] = [] + new_blocks: List[Block] = [] new_blk_items = [] for ea in values_axes: items = tuple(ea.values) @@ -4906,7 +4906,7 @@ def _maybe_convert_for_string_atom( elif not (inferred_type == "string" or dtype_name == "object"): return block.values - blocks: List["Block"] = block.fillna(nan_rep, downcast=False) + blocks: List[Block] = block.fillna(nan_rep, downcast=False) # Note: because block is always object dtype, fillna goes # through a path such that the result is always a 1-element list assert len(blocks) == 1 diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index d193971a6721e..8888be02dd5ea 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -1,6 +1,8 @@ """ Read SAS sas7bdat or xport files. """ +from __future__ import annotations + from abc import ABCMeta, abstractmethod from typing import TYPE_CHECKING, Hashable, Optional, Union, overload @@ -53,7 +55,7 @@ def read_sas( encoding: Optional[str] = ..., chunksize: None = ..., iterator: bool = ..., -) -> Union["DataFrame", ReaderBase]: +) -> Union[DataFrame, ReaderBase]: ... @@ -64,7 +66,7 @@ def read_sas( encoding: Optional[str] = None, chunksize: Optional[int] = None, iterator: bool = False, -) -> Union["DataFrame", ReaderBase]: +) -> Union[DataFrame, ReaderBase]: """ Read SAS files stored as either XPORT or SAS7BDAT format files. diff --git a/pandas/plotting/_matplotlib/__init__.py b/pandas/plotting/_matplotlib/__init__.py index 33011e6a66cac..e212127549355 100644 --- a/pandas/plotting/_matplotlib/__init__.py +++ b/pandas/plotting/_matplotlib/__init__.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import TYPE_CHECKING, Dict, Type from pandas.plotting._matplotlib.boxplot import ( @@ -31,7 +33,7 @@ if TYPE_CHECKING: from pandas.plotting._matplotlib.core import MPLPlot -PLOT_CLASSES: Dict[str, Type["MPLPlot"]] = { +PLOT_CLASSES: Dict[str, Type[MPLPlot]] = { "line": LinePlot, "bar": BarPlot, "barh": BarhPlot, diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index 8e81751d88fa1..068fd571753a2 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -126,7 +126,7 @@ def _get_marker_compat(marker): def radviz( frame: DataFrame, class_column, - ax: Optional["Axes"] = None, + ax: Optional[Axes] = None, color=None, colormap=None, **kwds, @@ -214,7 +214,7 @@ def normalize(series): def andrews_curves( frame: DataFrame, class_column, - ax: Optional["Axes"] = None, + ax: Optional[Axes] = None, samples: int = 200, color=None, colormap=None, @@ -280,7 +280,7 @@ def f(t): def bootstrap_plot( series: "Series", - fig: Optional["Figure"] = None, + fig: Optional[Figure] = None, size: int = 50, samples: int = 500, **kwds, @@ -337,7 +337,7 @@ def parallel_coordinates( frame: DataFrame, class_column, cols=None, - ax: Optional["Axes"] = None, + ax: Optional[Axes] = None, color=None, use_columns=False, xticks=None, @@ -413,9 +413,7 @@ def parallel_coordinates( return ax -def lag_plot( - series: "Series", lag: int = 1, ax: Optional["Axes"] = None, **kwds -) -> Axes: +def lag_plot(series: Series, lag: int = 1, ax: Optional[Axes] = None, **kwds) -> Axes: # workaround because `c='b'` is hardcoded in matplotlib's scatter method import matplotlib.pyplot as plt @@ -432,7 +430,7 @@ def lag_plot( return ax -def autocorrelation_plot(series: "Series", ax: Optional["Axes"] = None, **kwds) -> Axes: +def autocorrelation_plot(series: Series, ax: Optional[Axes] = None, **kwds) -> Axes: import matplotlib.pyplot as plt n = len(series) diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index c88c310d512be..2c9aadd9573cf 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -31,7 +31,7 @@ def get_standard_colors( num_colors: int, - colormap: Optional["Colormap"] = None, + colormap: Optional[Colormap] = None, color_type: str = "default", color: Optional[Union[Dict[str, Color], Color, Collection[Color]]] = None, ): @@ -83,7 +83,7 @@ def get_standard_colors( def _derive_colors( *, color: Optional[Union[Color, Collection[Color]]], - colormap: Optional[Union[str, "Colormap"]], + colormap: Optional[Union[str, Colormap]], color_type: str, num_colors: int, ) -> List[Color]: @@ -142,7 +142,7 @@ def _cycle_colors(colors: List[Color], num_colors: int) -> Iterator[Color]: def _get_colors_from_colormap( - colormap: Union[str, "Colormap"], + colormap: Union[str, Colormap], num_colors: int, ) -> List[Color]: """Get colors from colormap.""" @@ -150,7 +150,7 @@ def _get_colors_from_colormap( return [colormap(num) for num in np.linspace(0, 1, num=num_colors)] -def _get_cmap_instance(colormap: Union[str, "Colormap"]) -> Colormap: +def _get_cmap_instance(colormap: Union[str, Colormap]) -> Colormap: """Get instance of matplotlib colormap.""" if isinstance(colormap, str): cmap = colormap diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 7440daeb0a632..978b9721c992c 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -351,7 +351,7 @@ def _has_externally_shared_axis(ax1: "matplotlib.axes", compare_axis: "str") -> def handle_shared_axes( - axarr: Iterable["Axes"], + axarr: Iterable[Axes], nplots: int, naxes: int, nrows: int, @@ -404,7 +404,7 @@ def handle_shared_axes( _remove_labels_from_axis(ax.yaxis) -def flatten_axes(axes: Union["Axes", Sequence["Axes"]]) -> np.ndarray: +def flatten_axes(axes: Union[Axes, Sequence[Axes]]) -> np.ndarray: if not is_list_like(axes): return np.array([axes]) elif isinstance(axes, (np.ndarray, ABCIndex)): @@ -413,7 +413,7 @@ def flatten_axes(axes: Union["Axes", Sequence["Axes"]]) -> np.ndarray: def set_ticks_props( - axes: Union["Axes", Sequence["Axes"]], + axes: Union[Axes, Sequence[Axes]], xlabelsize=None, xrot=None, ylabelsize=None, @@ -433,7 +433,7 @@ def set_ticks_props( return axes -def get_all_lines(ax: "Axes") -> List["Line2D"]: +def get_all_lines(ax: Axes) -> List[Line2D]: lines = ax.get_lines() if hasattr(ax, "right_ax"): @@ -445,7 +445,7 @@ def get_all_lines(ax: "Axes") -> List["Line2D"]: return lines -def get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]: +def get_xlim(lines: Iterable[Line2D]) -> Tuple[float, float]: left, right = np.inf, -np.inf for line in lines: x = line.get_xdata(orig=False) diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index 65c5102e22997..3000e8cdeac1f 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -6,6 +6,8 @@ multiple dtypes. Not all methods are implemented yet, and the current implementation is not efficient. """ +from __future__ import annotations + import copy import itertools import operator @@ -33,7 +35,7 @@ class ArrowBoolDtype(ExtensionDtype): na_value = pa.NULL @classmethod - def construct_array_type(cls) -> Type["ArrowBoolArray"]: + def construct_array_type(cls) -> Type[ArrowBoolArray]: """ Return the array type associated with this dtype. @@ -57,7 +59,7 @@ class ArrowStringDtype(ExtensionDtype): na_value = pa.NULL @classmethod - def construct_array_type(cls) -> Type["ArrowStringArray"]: + def construct_array_type(cls) -> Type[ArrowStringArray]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/arrow/test_timestamp.py b/pandas/tests/extension/arrow/test_timestamp.py index 29bd3713e9552..bd661ad20bb02 100644 --- a/pandas/tests/extension/arrow/test_timestamp.py +++ b/pandas/tests/extension/arrow/test_timestamp.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import datetime from typing import Type @@ -22,7 +24,7 @@ class ArrowTimestampUSDtype(ExtensionDtype): na_value = pa.NULL @classmethod - def construct_array_type(cls) -> Type["ArrowTimestampUSArray"]: + def construct_array_type(cls) -> Type[ArrowTimestampUSArray]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index a713550dafa5c..3abd4fc8c6160 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import decimal import numbers import random @@ -30,7 +32,7 @@ def __repr__(self) -> str: return f"DecimalDtype(context={self.context})" @classmethod - def construct_array_type(cls) -> Type["DecimalArray"]: + def construct_array_type(cls) -> Type[DecimalArray]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index e3cdeb9c1951f..7d70903dc8c32 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -11,6 +11,8 @@ in that case. We *want* the dictionaries to be treated as scalars, so we hack around pandas by using UserDicts. """ +from __future__ import annotations + from collections import UserDict, abc import itertools import numbers @@ -33,7 +35,7 @@ class JSONDtype(ExtensionDtype): na_value: Mapping[str, Any] = UserDict() @classmethod - def construct_array_type(cls) -> Type["JSONArray"]: + def construct_array_type(cls) -> Type[JSONArray]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py index d86f90e58d897..5c5a6bb71240b 100644 --- a/pandas/tests/extension/list/array.py +++ b/pandas/tests/extension/list/array.py @@ -3,6 +3,8 @@ The ListArray stores an ndarray of lists. """ +from __future__ import annotations + import numbers import random import string @@ -22,7 +24,7 @@ class ListDtype(ExtensionDtype): na_value = np.nan @classmethod - def construct_array_type(cls) -> Type["ListArray"]: + def construct_array_type(cls) -> Type[ListArray]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index d31f57426a721..29c02916ec6e9 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -5,6 +5,8 @@ ``pytestmark = pytest.mark.slow`` at the module level. """ +from __future__ import annotations + import os from typing import TYPE_CHECKING, Sequence, Union import warnings @@ -184,7 +186,7 @@ def _check_visible(self, collections, visible=True): assert patch.get_visible() == visible def _check_patches_all_filled( - self, axes: Union["Axes", Sequence["Axes"]], filled: bool = True + self, axes: Union[Axes, Sequence[Axes]], filled: bool = True ) -> None: """ Check for each artist whether it is filled or not