Skip to content

STY: Bump pyright, pyupgrade and mypy for new PEP-696 syntax #60006

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Oct 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ repos:
hooks:
- id: isort
- repo: https://github.com/asottile/pyupgrade
rev: v3.16.0
rev: v3.17.0
hooks:
- id: pyupgrade
args: [--py310-plus]
Expand Down Expand Up @@ -112,7 +112,7 @@ repos:
types: [python]
stages: [manual]
additional_dependencies: &pyright_dependencies
- [email protected].352
- [email protected].383
- id: pyright
# note: assumes python env is setup and activated
name: pyright reportGeneralTypeIssues
Expand Down
6 changes: 3 additions & 3 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,10 @@ dependencies:
- cxx-compiler

# code checks
- flake8=6.1.0 # run in subprocess over docstring examples
- mypy=1.9.0 # pre-commit uses locally installed mypy
- flake8=7.1.0 # run in subprocess over docstring examples
- mypy=1.11.2 # pre-commit uses locally installed mypy
- tokenize-rt # scripts/check_for_inconsistent_pandas_namespace.py
- pre-commit>=3.6.0
- pre-commit>=4.0.1

# documentation
- gitpython # obtain contributors from git for whatsnew
Expand Down
4 changes: 2 additions & 2 deletions pandas/_config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ def __dir__(self) -> list[str]:


@contextmanager
def option_context(*args) -> Generator[None, None, None]:
def option_context(*args) -> Generator[None]:
"""
Context manager to temporarily set options in a ``with`` statement.
Expand Down Expand Up @@ -718,7 +718,7 @@ def _build_option_description(k: str) -> str:


@contextmanager
def config_prefix(prefix: str) -> Generator[None, None, None]:
def config_prefix(prefix: str) -> Generator[None]:
"""
contextmanager for multiple invocations of API with a common prefix
Expand Down
2 changes: 1 addition & 1 deletion pandas/_config/localization.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
@contextmanager
def set_locale(
new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
) -> Generator[str | tuple[str, str], None, None]:
) -> Generator[str | tuple[str, str]]:
"""
Context manager for temporarily setting a locale.
Expand Down
2 changes: 1 addition & 1 deletion pandas/_testing/_warnings.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def assert_produces_warning(
raise_on_extra_warnings: bool = True,
match: str | tuple[str | None, ...] | None = None,
must_find_all_warnings: bool = True,
) -> Generator[list[warnings.WarningMessage], None, None]:
) -> Generator[list[warnings.WarningMessage]]:
"""
Context manager for running code expected to either raise a specific warning,
multiple specific warnings, or not raise any warnings. Verifies that the code
Expand Down
8 changes: 4 additions & 4 deletions pandas/_testing/contexts.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
@contextmanager
def decompress_file(
path: FilePath | BaseBuffer, compression: CompressionOptions
) -> Generator[IO[bytes], None, None]:
) -> Generator[IO[bytes]]:
"""
Open a compressed file and return a file object.
Expand All @@ -50,7 +50,7 @@ def decompress_file(


@contextmanager
def set_timezone(tz: str) -> Generator[None, None, None]:
def set_timezone(tz: str) -> Generator[None]:
"""
Context manager for temporarily setting a timezone.
Expand Down Expand Up @@ -92,7 +92,7 @@ def setTZ(tz) -> None:


@contextmanager
def ensure_clean(filename=None) -> Generator[Any, None, None]:
def ensure_clean(filename=None) -> Generator[Any]:
"""
Gets a temporary path and agrees to remove on close.
Expand Down Expand Up @@ -124,7 +124,7 @@ def ensure_clean(filename=None) -> Generator[Any, None, None]:


@contextmanager
def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]:
def with_csv_dialect(name: str, **kwargs) -> Generator[None]:
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Expand Down
2 changes: 1 addition & 1 deletion pandas/compat/pickle_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def loads(


@contextlib.contextmanager
def patch_pickle() -> Generator[None, None, None]:
def patch_pickle() -> Generator[None]:
"""
Temporarily patch pickle to use our unpickler.
"""
Expand Down
12 changes: 4 additions & 8 deletions pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,12 +246,8 @@ def transform(self) -> DataFrame | Series:
and not obj.empty
):
raise ValueError("Transform function failed")
# error: Argument 1 to "__get__" of "AxisProperty" has incompatible type
# "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,
# DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame,
# Series]"
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index # type: ignore[arg-type]
obj.index
):
raise ValueError("Function did not transform")

Expand Down Expand Up @@ -803,7 +799,7 @@ def result_columns(self) -> Index:

@property
@abc.abstractmethod
def series_generator(self) -> Generator[Series, None, None]:
def series_generator(self) -> Generator[Series]:
pass

@staticmethod
Expand Down Expand Up @@ -1128,7 +1124,7 @@ class FrameRowApply(FrameApply):
axis: AxisInt = 0

@property
def series_generator(self) -> Generator[Series, None, None]:
def series_generator(self) -> Generator[Series]:
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))

@staticmethod
Expand Down Expand Up @@ -1235,7 +1231,7 @@ def apply_broadcast(self, target: DataFrame) -> DataFrame:
return result.T

@property
def series_generator(self) -> Generator[Series, None, None]:
def series_generator(self) -> Generator[Series]:
values = self.values
values = ensure_wrapped_if_datetimelike(values)
assert len(values) > 0
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arraylike.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,12 +403,12 @@ def _reconstruct(result):
# for np.<ufunc>(..) calls
# kwargs cannot necessarily be handled block-by-block, so only
# take this path if there are no kwargs
mgr = inputs[0]._mgr
mgr = inputs[0]._mgr # pyright: ignore[reportGeneralTypeIssues]
result = mgr.apply(getattr(ufunc, method))
else:
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
# Those can have an axis keyword and thus can't be called block-by-block
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs) # pyright: ignore[reportGeneralTypeIssues]
# e.g. np.negative (only one reached), with "where" and "out" in kwargs

result = reconstruct(result)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/arrow/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -2426,7 +2426,7 @@ def _str_rindex(self, sub: str, start: int = 0, end: int | None = None) -> Self:
result = self._apply_elementwise(predicate)
return type(self)(pa.chunked_array(result))

def _str_normalize(self, form: str) -> Self:
def _str_normalize(self, form: Literal["NFC", "NFD", "NFKC", "NFKD"]) -> Self:
predicate = lambda val: unicodedata.normalize(form, val)
result = self._apply_elementwise(predicate)
return type(self)(pa.chunked_array(result))
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/boolean.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ def _coerce_to_array(
assert dtype == "boolean"
return coerce_to_array(value, copy=copy)

def _logical_method(self, other, op):
def _logical_method(self, other, op): # type: ignore[override]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've never seen a type-ignore on the signature of a function, do you know why this is needed?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here's the mypy error

pandas/core/arrays/boolean.py:372: error: Signature of "_logical_method" incompatible with supertype "BaseMaskedArray"  [override]
pandas/core/arrays/boolean.py:372: note:      Superclass:
pandas/core/arrays/boolean.py:372: note:          def _arith_method(self: BaseMaskedArray, other: Any, op: Any) -> Any
pandas/core/arrays/boolean.py:372: note:      Subclass:
pandas/core/arrays/boolean.py:372: note:          def _logical_method(self, other: Any, op: Any) -> Any
pandas/core/arrays/boolean.py:372: note:      Superclass:
pandas/core/arrays/boolean.py:372: note:          def _arith_method(self: BaseMaskedArray, other: Any, op: Any) -> Any
pandas/core/arrays/boolean.py:372: note:      Subclass:
pandas/core/arrays/boolean.py:372: note:          def _logical_method(self, other: Any, op: Any) -> Any

In BasedMaskedArray we assign _logical_method = _arith_method, but override _logical_method in BooleanArray. It looks mypy is doing some inference on self even though we don't type it?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think self is typed implicitly, yes. Makes sense that it's an override issue, thanks.

assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_scalar = lib.is_scalar(other)
mask = None
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2918,7 +2918,7 @@ def _generate_range(
offset: BaseOffset,
*,
unit: str,
) -> Generator[Timestamp, None, None]:
) -> Generator[Timestamp]:
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
Expand Down
4 changes: 1 addition & 3 deletions pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -560,9 +560,7 @@ def convert_to_list_like(


@contextlib.contextmanager
def temp_setattr(
obj, attr: str, value, condition: bool = True
) -> Generator[None, None, None]:
def temp_setattr(obj, attr: str, value, condition: bool = True) -> Generator[None]:
"""
Temporarily set attribute on an object.
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _preparse(
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), "f must be callable"
return tokenize.untokenize(f(x) for x in tokenize_string(source))
return tokenize.untokenize(f(x) for x in tokenize_string(source)) # pyright: ignore[reportArgumentType]


def _is_type(t):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -2306,7 +2306,7 @@ def maybe_reorder(

if any(exclude):
arr_exclude = (x for x in exclude if x in arr_columns)
to_remove = {arr_columns.get_loc(col) for col in arr_exclude}
to_remove = {arr_columns.get_loc(col) for col in arr_exclude} # pyright: ignore[reportUnhashable]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]

columns = columns.drop(exclude)
Expand Down
8 changes: 5 additions & 3 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -3719,7 +3719,7 @@ def blk_func(values: ArrayLike) -> ArrayLike:
mask = isna(values)
if values.ndim == 1:
indexer = np.empty(values.shape, dtype=np.intp)
col_func(out=indexer, mask=mask)
col_func(out=indexer, mask=mask) # type: ignore[arg-type]
return algorithms.take_nd(values, indexer)

else:
Expand Down Expand Up @@ -4081,7 +4081,9 @@ def _nth(
def quantile(
self,
q: float | AnyArrayLike = 0.5,
interpolation: str = "linear",
interpolation: Literal[
"linear", "lower", "higher", "nearest", "midpoint"
] = "linear",
numeric_only: bool = False,
):
"""
Expand Down Expand Up @@ -4270,7 +4272,7 @@ def blk_func(values: ArrayLike) -> ArrayLike:
func(
out[0],
values=vals,
mask=mask,
mask=mask, # type: ignore[arg-type]
result_mask=result_mask,
is_datetimelike=is_datetimelike,
)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -898,7 +898,7 @@ def _unob_index_and_ids(
return unob_index, unob_ids

@final
def get_group_levels(self) -> Generator[Index, None, None]:
def get_group_levels(self) -> Generator[Index]:
# Note: only called from _insert_inaxis_grouper, which
# is only called for BaseGrouper, never for BinGrouper
result_index = self.result_index
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -991,7 +991,7 @@ def length(self) -> Index:
# --------------------------------------------------------------------
# Set Operations

def _intersection(self, other, sort):
def _intersection(self, other, sort: bool = False):
"""
intersection specialized to the case with matching dtypes.
"""
Expand All @@ -1006,7 +1006,7 @@ def _intersection(self, other, sort):
# duplicates
taken = self._intersection_non_unique(other)

if sort is None:
if sort:
taken = taken.sort_values()

return taken
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -2664,7 +2664,7 @@ def _reorder_ilevels(self, order) -> MultiIndex:

def _recode_for_new_levels(
self, new_levels, copy: bool = True
) -> Generator[np.ndarray, None, None]:
) -> Generator[np.ndarray]:
if len(new_levels) > self.nlevels:
raise AssertionError(
f"Length of new_levels ({len(new_levels)}) "
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def _split_op_result(self, result: ArrayLike) -> list[Block]:
return [nb]

@final
def _split(self) -> Generator[Block, None, None]:
def _split(self) -> Generator[Block]:
"""
Split a block into a list of single-column blocks.
"""
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def _concat_homogeneous_fastpath(

def _get_combined_plan(
mgrs: list[BlockManager],
) -> Generator[tuple[BlockPlacement, list[JoinUnit]], None, None]:
) -> Generator[tuple[BlockPlacement, list[JoinUnit]]]:
max_len = mgrs[0].shape[0]

blknos_list = [mgr.blknos for mgr in mgrs]
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -856,7 +856,7 @@ def _slice_take_blocks_ax0(
*,
use_na_proxy: bool = False,
ref_inplace_op: bool = False,
) -> Generator[Block, None, None]:
) -> Generator[Block]:
"""
Slice/take blocks along axis=0.
Expand Down Expand Up @@ -1731,7 +1731,7 @@ def unstack(self, unstacker, fill_value) -> BlockManager:
bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)
return bm

def to_iter_dict(self) -> Generator[tuple[str, Self], None, None]:
def to_iter_dict(self) -> Generator[tuple[str, Self]]:
"""
Yield a tuple of (str(dtype), BlockManager)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/methods/to_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

def create_data_for_split(
df: DataFrame, are_all_object_dtype_cols: bool, object_dtype_indices: list[int]
) -> Generator[list, None, None]:
) -> Generator[list]:
"""
Simple helper method to create data for to ``to_dict(orient="split")``
to create the main output data
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ def transform(self, arg, *args, **kwargs):
arg, *args, **kwargs
)

def _downsample(self, f, **kwargs):
def _downsample(self, how, **kwargs):
raise AbstractMethodError(self)

def _upsample(self, f, limit: int | None = None, fill_value=None):
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -813,8 +813,7 @@ def _values(self):
def _references(self) -> BlockValuesRefs:
return self._mgr._block.refs

# error: Decorated property not supported
@Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc]
@Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[prop-decorator]
@property
def array(self) -> ExtensionArray:
return self._mgr.array_values()
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/tools/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1000,7 +1000,7 @@ def to_datetime(
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
exact=exact, # type: ignore[arg-type]
)
result: Timestamp | NaTType | Series | Index

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/window/rolling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1507,7 +1507,7 @@ def _generate_cython_apply_func(
window_aggregations.roll_apply,
args=args,
kwargs=kwargs,
raw=raw,
raw=bool(raw),
function=function,
)

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/excel/_odswriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class ODSWriter(ExcelWriter):
_engine = "odf"
_supported_extensions = (".ods",)

def __init__(
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/excel/_openpyxl.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class OpenpyxlWriter(ExcelWriter):
_engine = "openpyxl"
_supported_extensions = (".xlsx", ".xlsm")

def __init__(
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/excel/_xlsxwriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ class XlsxWriter(ExcelWriter):
_engine = "xlsxwriter"
_supported_extensions = (".xlsx",)

def __init__(
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
Expand Down
Loading