diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 58920420778b2..77582c46977c1 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from abc import ( ABC, abstractmethod, @@ -10,10 +12,6 @@ Any, Callable, Mapping, - Optional, - Tuple, - Type, - Union, ) import numpy as np @@ -78,12 +76,12 @@ def to_json( path_or_buf, obj: NDFrame, - orient: Optional[str] = None, + orient: str | None = None, date_format: str = "epoch", double_precision: int = 10, force_ascii: bool = True, date_unit: str = "ms", - default_handler: Optional[Callable[[Any], JSONSerializable]] = None, + default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool = False, compression: CompressionOptions = "infer", index: bool = True, @@ -102,7 +100,7 @@ def to_json( if orient == "table" and isinstance(obj, Series): obj = obj.to_frame(name=obj.name or "values") - writer: Type[Writer] + writer: type[Writer] if orient == "table" and isinstance(obj, DataFrame): writer = JSONTableWriter elif isinstance(obj, Series): @@ -143,13 +141,13 @@ class Writer(ABC): def __init__( self, obj, - orient: Optional[str], + orient: str | None, date_format: str, double_precision: int, ensure_ascii: bool, date_unit: str, index: bool, - default_handler: Optional[Callable[[Any], JSONSerializable]] = None, + default_handler: Callable[[Any], JSONSerializable] | None = None, indent: int = 0, ): self.obj = obj @@ -187,7 +185,7 @@ def write(self): @property @abstractmethod - def obj_to_write(self) -> Union[NDFrame, Mapping[IndexLabel, Any]]: + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: """Object to write in JSON format.""" pass @@ -196,7 +194,7 @@ class SeriesWriter(Writer): _default_orient = "index" @property - def obj_to_write(self) -> Union[NDFrame, Mapping[IndexLabel, Any]]: + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: if not self.index and self.orient == "split": return {"name": self.obj.name, "data": self.obj.values} else: @@ -211,7 +209,7 @@ class FrameWriter(Writer): _default_orient = "columns" @property - def obj_to_write(self) -> Union[NDFrame, Mapping[IndexLabel, Any]]: + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: if not self.index and self.orient == "split": obj_to_write = self.obj.to_dict(orient="split") del obj_to_write["index"] @@ -243,13 +241,13 @@ class JSONTableWriter(FrameWriter): def __init__( self, obj, - orient: Optional[str], + orient: str | None, date_format: str, double_precision: int, ensure_ascii: bool, date_unit: str, index: bool, - default_handler: Optional[Callable[[Any], JSONSerializable]] = None, + default_handler: Callable[[Any], JSONSerializable] | None = None, indent: int = 0, ): """ @@ -313,7 +311,7 @@ def __init__( self.index = index @property - def obj_to_write(self) -> Union[NDFrame, Mapping[IndexLabel, Any]]: + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: return {"schema": self.schema, "data": self.obj} @@ -326,7 +324,7 @@ def read_json( path_or_buf=None, orient=None, typ="frame", - dtype: Optional[DtypeArg] = None, + dtype: DtypeArg | None = None, convert_axes=None, convert_dates=True, keep_default_dates: bool = True, @@ -334,11 +332,11 @@ def read_json( precise_float: bool = False, date_unit=None, encoding=None, - encoding_errors: Optional[str] = "strict", + encoding_errors: str | None = "strict", lines: bool = False, - chunksize: Optional[int] = None, + chunksize: int | None = None, compression: CompressionOptions = "infer", - nrows: Optional[int] = None, + nrows: int | None = None, storage_options: StorageOptions = None, ): """ @@ -639,11 +637,11 @@ def __init__( date_unit, encoding, lines: bool, - chunksize: Optional[int], + chunksize: int | None, compression: CompressionOptions, - nrows: Optional[int], + nrows: int | None, storage_options: StorageOptions = None, - encoding_errors: Optional[str] = "strict", + encoding_errors: str | None = "strict", ): self.orient = orient @@ -663,7 +661,7 @@ def __init__( self.nrows_seen = 0 self.nrows = nrows self.encoding_errors = encoding_errors - self.handles: Optional[IOHandles] = None + self.handles: IOHandles | None = None if self.chunksize is not None: self.chunksize = validate_integer("chunksize", self.chunksize, 1) @@ -816,7 +814,7 @@ def __exit__(self, exc_type, exc_value, traceback): class Parser: - _split_keys: Tuple[str, ...] + _split_keys: tuple[str, ...] _default_orient: str _STAMP_UNITS = ("s", "ms", "us", "ns") @@ -831,7 +829,7 @@ def __init__( self, json, orient, - dtype: Optional[DtypeArg] = None, + dtype: DtypeArg | None = None, convert_axes=True, convert_dates=True, keep_default_dates=False, @@ -865,7 +863,7 @@ def __init__( self.convert_dates = convert_dates self.date_unit = date_unit self.keep_default_dates = keep_default_dates - self.obj: Optional[FrameOrSeriesUnion] = None + self.obj: FrameOrSeriesUnion | None = None def check_keys_split(self, decoded): """ diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 7c6a718b34e89..7e3bf0b224e0e 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import contextlib import datetime as pydt from datetime import ( @@ -6,13 +8,7 @@ tzinfo, ) import functools -from typing import ( - Any, - Dict, - List, - Optional, - Tuple, -) +from typing import Any from dateutil.relativedelta import relativedelta import matplotlib.dates as dates @@ -169,7 +165,7 @@ def convert(value, unit, axis): return value @staticmethod - def axisinfo(unit, axis) -> Optional[units.AxisInfo]: + def axisinfo(unit, axis) -> units.AxisInfo | None: if unit != "time": return None @@ -319,7 +315,7 @@ def try_parse(values): return values @staticmethod - def axisinfo(unit: Optional[tzinfo], axis) -> units.AxisInfo: + def axisinfo(unit: tzinfo | None, axis) -> units.AxisInfo: """ Return the :class:`~matplotlib.units.AxisInfo` for *unit*. @@ -447,7 +443,7 @@ def autoscale(self): return self.nonsingular(vmin, vmax) -def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime: +def _from_ordinal(x, tz: tzinfo | None = None) -> datetime: ix = int(x) dt = datetime.fromordinal(ix) remainder = float(x) - ix @@ -476,7 +472,7 @@ def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime: # ------------------------------------------------------------------------- -def _get_default_annual_spacing(nyears) -> Tuple[int, int]: +def _get_default_annual_spacing(nyears) -> tuple[int, int]: """ Returns a default spacing between consecutive ticks for annual data. """ @@ -1027,8 +1023,8 @@ def __init__( freq = to_offset(freq) self.format = None self.freq = freq - self.locs: List[Any] = [] # unused, for matplotlib compat - self.formatdict: Optional[Dict[Any, Any]] = None + self.locs: list[Any] = [] # unused, for matplotlib compat + self.formatdict: dict[Any, Any] | None = None self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index 2a27f670fa046..ca22973d0b4d3 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -1,7 +1,4 @@ -from typing import ( - Optional, - Type, -) +from __future__ import annotations import pytest @@ -67,10 +64,10 @@ class BaseArithmeticOpsTests(BaseOpsUtil): * divmod_exc = TypeError """ - series_scalar_exc: Optional[Type[TypeError]] = TypeError - frame_scalar_exc: Optional[Type[TypeError]] = TypeError - series_array_exc: Optional[Type[TypeError]] = TypeError - divmod_exc: Optional[Type[TypeError]] = TypeError + series_scalar_exc: type[TypeError] | None = TypeError + frame_scalar_exc: type[TypeError] | None = TypeError + series_array_exc: type[TypeError] | None = TypeError + divmod_exc: type[TypeError] | None = TypeError def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # series & scalar diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py index 1eb52ab78e1a0..e11746c118ff7 100644 --- a/pandas/tests/io/parser/conftest.py +++ b/pandas/tests/io/parser/conftest.py @@ -1,8 +1,6 @@ +from __future__ import annotations + import os -from typing import ( - List, - Optional, -) import pytest @@ -13,9 +11,9 @@ class BaseParser: - engine: Optional[str] = None + engine: str | None = None low_memory = True - float_precision_choices: List[Optional[str]] = [] + float_precision_choices: list[str | None] = [] def update_kwargs(self, kwargs): kwargs = kwargs.copy() diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py index db63785988977..0227a07877db0 100644 --- a/pandas/tests/tseries/offsets/common.py +++ b/pandas/tests/tseries/offsets/common.py @@ -1,11 +1,9 @@ """ Assertion helpers and base class for offsets tests """ +from __future__ import annotations + from datetime import datetime -from typing import ( - Optional, - Type, -) from dateutil.tz.tz import tzlocal import pytest @@ -61,7 +59,7 @@ class WeekDay: class Base: - _offset: Optional[Type[DateOffset]] = None + _offset: type[DateOffset] | None = None d = Timestamp(datetime(2008, 1, 2)) timezones = [ diff --git a/pandas/util/version/__init__.py b/pandas/util/version/__init__.py index 5ca3abb916ce0..3d59cef4d4f77 100644 --- a/pandas/util/version/__init__.py +++ b/pandas/util/version/__init__.py @@ -6,6 +6,7 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. +from __future__ import annotations import collections import itertools @@ -13,8 +14,6 @@ from typing import ( Callable, Iterator, - List, - Optional, SupportsInt, Tuple, Union, @@ -49,7 +48,7 @@ def __gt__(self, other: object) -> bool: def __ge__(self, other: object) -> bool: return True - def __neg__(self: object) -> "NegativeInfinityType": + def __neg__(self: object) -> NegativeInfinityType: return NegativeInfinity @@ -115,7 +114,7 @@ def __neg__(self: object) -> InfinityType: ) -def parse(version: str) -> Union["LegacyVersion", "Version"]: +def parse(version: str) -> LegacyVersion | Version: """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is @@ -134,7 +133,7 @@ class InvalidVersion(ValueError): class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] + _key: CmpKey | LegacyCmpKey def __hash__(self) -> int: return hash(self._key) @@ -142,13 +141,13 @@ def __hash__(self) -> int: # Please keep the duplicated `isinstance` check # in the six comparisons hereunder # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other: "_BaseVersion") -> bool: + def __lt__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key < other._key - def __le__(self, other: "_BaseVersion") -> bool: + def __le__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented @@ -160,13 +159,13 @@ def __eq__(self, other: object) -> bool: return self._key == other._key - def __ge__(self, other: "_BaseVersion") -> bool: + def __ge__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key >= other._key - def __gt__(self, other: "_BaseVersion") -> bool: + def __gt__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented @@ -279,7 +278,7 @@ def _legacy_cmpkey(version: str) -> LegacyCmpKey: # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. - parts: List[str] = [] + parts: list[str] = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag @@ -400,25 +399,25 @@ def epoch(self) -> int: return _epoch @property - def release(self) -> Tuple[int, ...]: - _release: Tuple[int, ...] = self._version.release + def release(self) -> tuple[int, ...]: + _release: tuple[int, ...] = self._version.release return _release @property - def pre(self) -> Optional[Tuple[str, int]]: - _pre: Optional[Tuple[str, int]] = self._version.pre + def pre(self) -> tuple[str, int] | None: + _pre: tuple[str, int] | None = self._version.pre return _pre @property - def post(self) -> Optional[int]: + def post(self) -> int | None: return self._version.post[1] if self._version.post else None @property - def dev(self) -> Optional[int]: + def dev(self) -> int | None: return self._version.dev[1] if self._version.dev else None @property - def local(self) -> Optional[str]: + def local(self) -> str | None: if self._version.local: return ".".join(str(x) for x in self._version.local) else: @@ -467,8 +466,8 @@ def micro(self) -> int: def _parse_letter_version( - letter: str, number: Union[str, bytes, SupportsInt] -) -> Optional[Tuple[str, int]]: + letter: str, number: str | bytes | SupportsInt +) -> tuple[str, int] | None: if letter: # We consider there to be an implicit 0 in a pre-release if there is @@ -505,7 +504,7 @@ def _parse_letter_version( _local_version_separators = re.compile(r"[\._-]") -def _parse_local_version(local: str) -> Optional[LocalType]: +def _parse_local_version(local: str) -> LocalType | None: """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ @@ -519,11 +518,11 @@ def _parse_local_version(local: str) -> Optional[LocalType]: def _cmpkey( epoch: int, - release: Tuple[int, ...], - pre: Optional[Tuple[str, int]], - post: Optional[Tuple[str, int]], - dev: Optional[Tuple[str, int]], - local: Optional[Tuple[SubLocalType]], + release: tuple[int, ...], + pre: tuple[str, int] | None, + post: tuple[str, int] | None, + dev: tuple[str, int] | None, + local: tuple[SubLocalType] | None, ) -> CmpKey: # When we compare a release version, we want to compare it with all of the diff --git a/scripts/no_bool_in_generic.py b/scripts/no_bool_in_generic.py index f80eff56b2729..f63ae4ae1659c 100644 --- a/scripts/no_bool_in_generic.py +++ b/scripts/no_bool_in_generic.py @@ -10,23 +10,18 @@ The function `visit` is adapted from a function by the same name in pyupgrade: https://github.com/asottile/pyupgrade/blob/5495a248f2165941c5d3b82ac3226ba7ad1fa59d/pyupgrade/_data.py#L70-L113 """ +from __future__ import annotations import argparse import ast import collections -from typing import ( - Dict, - List, - Optional, - Sequence, - Tuple, -) +from typing import Sequence -def visit(tree: ast.Module) -> Dict[int, List[int]]: +def visit(tree: ast.Module) -> dict[int, list[int]]: "Step through tree, recording when nodes are in annotations." in_annotation = False - nodes: List[Tuple[bool, ast.AST]] = [(in_annotation, tree)] + nodes: list[tuple[bool, ast.AST]] = [(in_annotation, tree)] to_replace = collections.defaultdict(list) while nodes: @@ -62,7 +57,7 @@ def replace_bool_with_bool_t(to_replace, content: str) -> str: return "\n".join(new_lines) -def check_for_bool_in_generic(content: str) -> Tuple[bool, str]: +def check_for_bool_in_generic(content: str) -> tuple[bool, str]: tree = ast.parse(content) to_replace = visit(tree) @@ -74,7 +69,7 @@ def check_for_bool_in_generic(content: str) -> Tuple[bool, str]: return mutated, replace_bool_with_bool_t(to_replace, content) -def main(argv: Optional[Sequence[str]] = None) -> None: +def main(argv: Sequence[str] | None = None) -> None: parser = argparse.ArgumentParser() parser.add_argument("paths", nargs="*") args = parser.parse_args(argv) diff --git a/scripts/use_pd_array_in_core.py b/scripts/use_pd_array_in_core.py index 531084683bdb1..61ba070e52f1b 100644 --- a/scripts/use_pd_array_in_core.py +++ b/scripts/use_pd_array_in_core.py @@ -9,13 +9,12 @@ """ +from __future__ import annotations + import argparse import ast import sys -from typing import ( - Optional, - Sequence, -) +from typing import Sequence ERROR_MESSAGE = ( "{path}:{lineno}:{col_offset}: " @@ -62,7 +61,7 @@ def use_pd_array(content: str, path: str) -> None: visitor.visit(tree) -def main(argv: Optional[Sequence[str]] = None) -> None: +def main(argv: Sequence[str] | None = None) -> None: parser = argparse.ArgumentParser() parser.add_argument("paths", nargs="*") args = parser.parse_args(argv) diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index d0f32bb554cf9..b77210e3d2bab 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -13,6 +13,8 @@ $ ./validate_docstrings.py $ ./validate_docstrings.py pandas.DataFrame.head """ +from __future__ import annotations + import argparse import doctest import glob @@ -22,10 +24,6 @@ import subprocess import sys import tempfile -from typing import ( - List, - Optional, -) try: from io import StringIO @@ -315,7 +313,7 @@ def validate_all(prefix, ignore_deprecated=False): def print_validate_all_results( prefix: str, - errors: Optional[List[str]], + errors: list[str] | None, output_format: str, ignore_deprecated: bool, ):