Skip to content

STYLE add future annotations where possible #47769

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 12 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,6 @@ repos:
stages: [manual]
additional_dependencies: &pyright_dependencies
- [email protected]
- repo: local
hooks:
Comment on lines -97 to -98
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not relevant to this PR, but there's no need to keep repeating repo: local, so removing these extra ones while I'm here

- id: pyright_reportGeneralTypeIssues
name: pyright reportGeneralTypeIssues
entry: pyright --skipunannotated -p pyright_reportGeneralTypeIssues.json
Expand All @@ -105,8 +103,6 @@ repos:
types: [python]
stages: [manual]
additional_dependencies: *pyright_dependencies
- repo: local
hooks:
- id: mypy
name: mypy
entry: mypy
Expand All @@ -115,8 +111,6 @@ repos:
pass_filenames: false
types: [python]
stages: [manual]
- repo: local
hooks:
- id: flake8-rst
name: flake8-rst
description: Run flake8 on code snippets in docstrings or RST files
Expand Down Expand Up @@ -237,3 +231,15 @@ repos:
additional_dependencies:
- flake8==4.0.1
- flake8-pyi==22.5.1
- id: future-annotations
name: import annotations from __future__
entry: 'from __future__ import annotations'
language: pygrep
args: [--negate]
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

negate checks that the pattern in entry is always present (as opposed to checking it's absent)

files: ^pandas/
types: [python]
exclude: |
(?x)
/(__init__\.py)|(api\.py)|(_version\.py)|(testing\.py)|(conftest\.py)$
|/tests/
|/_testing/
2 changes: 2 additions & 0 deletions pandas/_config/dates.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""
config for datetime formatting
"""
from __future__ import annotations

from pandas._config import config as cf

pc_date_dayfirst_doc = """
Expand Down
2 changes: 2 additions & 0 deletions pandas/compat/chainmap.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

from typing import (
ChainMap,
TypeVar,
Expand Down
2 changes: 2 additions & 0 deletions pandas/compat/pyarrow.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
""" support pyarrow compatibility across versions """

from __future__ import annotations

from pandas.util.version import Version

try:
Expand Down
2 changes: 2 additions & 0 deletions pandas/core/_numba/kernels/shared.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

import numba
import numpy as np

Expand Down
2 changes: 2 additions & 0 deletions pandas/core/array_algos/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
transforms.py is for shape-preserving functions.
"""

from __future__ import annotations

import numpy as np


Expand Down
2 changes: 2 additions & 0 deletions pandas/core/arraylike.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
Index
ExtensionArray
"""
from __future__ import annotations

import operator
from typing import Any
import warnings
Expand Down
2 changes: 2 additions & 0 deletions pandas/core/computation/check.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

from pandas.compat._optional import import_optional_dependency

ne = import_optional_dependency("numexpr", errors="warn")
Expand Down
2 changes: 2 additions & 0 deletions pandas/core/computation/common.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

from functools import reduce

import numpy as np
Expand Down
2 changes: 2 additions & 0 deletions pandas/core/config_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
module is imported, register them here rather than in the module.

"""
from __future__ import annotations

import os
from typing import Callable
import warnings
Expand Down
2 changes: 2 additions & 0 deletions pandas/core/dtypes/inference.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
""" basic inference routines """

from __future__ import annotations

from collections import abc
from numbers import Number
import re
Expand Down
7 changes: 2 additions & 5 deletions pandas/core/exchange/buffer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
from typing import (
Optional,
Tuple,
)
from __future__ import annotations

import numpy as np
from packaging import version
Expand Down Expand Up @@ -60,7 +57,7 @@ def __dlpack__(self):
return self._x.__dlpack__()
raise NotImplementedError("__dlpack__")

def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]:
def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
"""
Device type and device ID for where the data in the buffer resides.
"""
Expand Down
35 changes: 17 additions & 18 deletions pandas/core/exchange/dataframe_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,17 @@
A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api
"""

from __future__ import annotations

from abc import (
ABC,
abstractmethod,
)
import enum
from typing import (
Any,
Dict,
Iterable,
Optional,
Sequence,
Tuple,
TypedDict,
)

Expand Down Expand Up @@ -90,18 +89,18 @@ class ColumnNullType(enum.IntEnum):
class ColumnBuffers(TypedDict):
# first element is a buffer containing the column data;
# second element is the data buffer's associated dtype
data: Tuple["Buffer", Any]
data: tuple[Buffer, Any]

# first element is a buffer containing mask values indicating missing data;
# second element is the mask value buffer's associated dtype.
# None if the null representation is not a bit or byte mask
validity: Optional[Tuple["Buffer", Any]]
validity: tuple[Buffer, Any] | None

# first element is a buffer containing the offset values for
# variable-size binary data (e.g., variable-length strings);
# second element is the offsets buffer's associated dtype.
# None if the data buffer does not have an associated offsets buffer
offsets: Optional[Tuple["Buffer", Any]]
offsets: tuple[Buffer, Any] | None


class CategoricalDescription(TypedDict):
Expand All @@ -111,7 +110,7 @@ class CategoricalDescription(TypedDict):
is_dictionary: bool
# Python-level only (e.g. ``{int: str}``).
# None if not a dictionary-style categorical.
mapping: Optional[dict]
mapping: dict | None


class Buffer(ABC):
Expand Down Expand Up @@ -161,7 +160,7 @@ def __dlpack__(self):
raise NotImplementedError("__dlpack__")

@abstractmethod
def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]:
def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
"""
Device type and device ID for where the data in the buffer resides.
Uses device type codes matching DLPack.
Expand Down Expand Up @@ -239,7 +238,7 @@ def offset(self) -> int:

@property
@abstractmethod
def dtype(self) -> Tuple[DtypeKind, int, str, str]:
def dtype(self) -> tuple[DtypeKind, int, str, str]:
"""
Dtype description as a tuple ``(kind, bit-width, format string, endianness)``.

Expand Down Expand Up @@ -293,7 +292,7 @@ def describe_categorical(self) -> CategoricalDescription:

@property
@abstractmethod
def describe_null(self) -> Tuple[ColumnNullType, Any]:
def describe_null(self) -> tuple[ColumnNullType, Any]:
"""
Return the missing value (or "null") representation the column dtype
uses, as a tuple ``(kind, value)``.
Expand All @@ -306,7 +305,7 @@ def describe_null(self) -> Tuple[ColumnNullType, Any]:

@property
@abstractmethod
def null_count(self) -> Optional[int]:
def null_count(self) -> int | None:
"""
Number of null elements, if known.

Expand All @@ -316,7 +315,7 @@ def null_count(self) -> Optional[int]:

@property
@abstractmethod
def metadata(self) -> Dict[str, Any]:
def metadata(self) -> dict[str, Any]:
"""
The metadata for the column. See `DataFrame.metadata` for more details.
"""
Expand All @@ -330,7 +329,7 @@ def num_chunks(self) -> int:
pass

@abstractmethod
def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["Column"]:
def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
"""
Return an iterator yielding the chunks.

Expand Down Expand Up @@ -395,7 +394,7 @@ def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):

@property
@abstractmethod
def metadata(self) -> Dict[str, Any]:
def metadata(self) -> dict[str, Any]:
"""
The metadata for the data frame, as a dictionary with string keys. The
contents of `metadata` may be anything, they are meant for a library
Expand All @@ -415,7 +414,7 @@ def num_columns(self) -> int:
pass

@abstractmethod
def num_rows(self) -> Optional[int]:
def num_rows(self) -> int | None:
# TODO: not happy with Optional, but need to flag it may be expensive
# why include it if it may be None - what do we expect consumers
# to do here?
Expand Down Expand Up @@ -460,21 +459,21 @@ def get_columns(self) -> Iterable[Column]:
pass

@abstractmethod
def select_columns(self, indices: Sequence[int]) -> "DataFrame":
def select_columns(self, indices: Sequence[int]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by index.
"""
pass

@abstractmethod
def select_columns_by_name(self, names: Sequence[str]) -> "DataFrame":
def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by name.
"""
pass

@abstractmethod
def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["DataFrame"]:
def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
"""
Return an iterator yielding the chunks.

Expand Down
33 changes: 14 additions & 19 deletions pandas/core/exchange/from_dataframe.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
from __future__ import annotations

import ctypes
import re
from typing import (
Any,
Dict,
List,
Optional,
Tuple,
Union,
)
from typing import Any

import numpy as np

Expand All @@ -24,7 +19,7 @@
Endianness,
)

_NP_DTYPES: Dict[DtypeKind, Dict[int, Any]] = {
_NP_DTYPES: dict[DtypeKind, dict[int, Any]] = {
DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64},
DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64},
DtypeKind.FLOAT: {32: np.float32, 64: np.float64},
Expand Down Expand Up @@ -108,7 +103,7 @@ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
"""
# We need a dict of columns here, with each column being a NumPy array (at
# least for now, deal with non-NumPy dtypes later).
columns: Dict[str, Any] = {}
columns: dict[str, Any] = {}
buffers = [] # hold on to buffers, keeps memory alive
for name in df.column_names():
if not isinstance(name, str):
Expand Down Expand Up @@ -140,7 +135,7 @@ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
return pandas_df


def primitive_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
"""
Convert a column holding one of the primitive dtypes to a NumPy array.

Expand All @@ -165,7 +160,7 @@ def primitive_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
return data, buffers


def categorical_column_to_series(col: Column) -> Tuple[pd.Series, Any]:
def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:
"""
Convert a column holding categorical data to a pandas Series.

Expand Down Expand Up @@ -205,7 +200,7 @@ def categorical_column_to_series(col: Column) -> Tuple[pd.Series, Any]:
return data, buffers


def string_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
"""
Convert a column holding string data to a NumPy array.

Expand Down Expand Up @@ -268,7 +263,7 @@ def string_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
null_pos = ~null_pos

# Assemble the strings from the code units
str_list: List[Union[None, float, str]] = [None] * col.size
str_list: list[None | float | str] = [None] * col.size
for i in range(col.size):
# Check for missing values
if null_pos is not None and null_pos[i]:
Expand Down Expand Up @@ -324,7 +319,7 @@ def parse_datetime_format_str(format_str, data):
raise NotImplementedError(f"DateTime kind is not supported: {format_str}")


def datetime_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
"""
Convert a column holding DateTime data to a NumPy array.

Expand Down Expand Up @@ -362,9 +357,9 @@ def datetime_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:

def buffer_to_ndarray(
buffer: Buffer,
dtype: Tuple[DtypeKind, int, str, str],
dtype: tuple[DtypeKind, int, str, str],
offset: int = 0,
length: Optional[int] = None,
length: int | None = None,
) -> np.ndarray:
"""
Build a NumPy array from the passed buffer.
Expand Down Expand Up @@ -470,9 +465,9 @@ def bitmask_to_bool_ndarray(


def set_nulls(
data: Union[np.ndarray, pd.Series],
data: np.ndarray | pd.Series,
col: Column,
validity: Optional[Tuple[Buffer, Tuple[DtypeKind, int, str, str]]],
validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,
allow_modify_inplace: bool = True,
):
"""
Expand Down
2 changes: 2 additions & 0 deletions pandas/core/exchange/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
Utility functions and objects for implementing the exchange API.
"""

from __future__ import annotations

import re
import typing

Expand Down
Loading