Skip to content

Commit db8a204

Browse files
CI: Upgrade 'pyupgrade' (v2.7.4 --> v2.9.0)
Resolves: GH39523 This commit also includes style/formatting changes required after upgrading pyupgrade, namely, removing unused imports (flake8), and running black and isort.
1 parent 9c33e5e commit db8a204

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

103 files changed

+1601
-1996
lines changed

.pre-commit-config.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ repos:
2424
hooks:
2525
- id: isort
2626
- repo: https://github.com/asottile/pyupgrade
27-
rev: v2.7.4
27+
rev: v2.9.0
2828
hooks:
2929
- id: pyupgrade
3030
args: [--py37-plus]

pandas/_testing/__init__.py

+17-25
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,7 @@
77
import os
88
import re
99
import string
10-
from typing import (
11-
TYPE_CHECKING,
12-
Callable,
13-
ContextManager,
14-
Counter,
15-
Iterable,
16-
List,
17-
Type,
18-
)
10+
from typing import TYPE_CHECKING, Callable, ContextManager, Counter, Iterable
1911
import warnings
2012

2113
import numpy as np
@@ -107,24 +99,24 @@
10799
_N = 30
108100
_K = 4
109101

110-
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
111-
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
112-
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
113-
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
102+
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
103+
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
104+
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
105+
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
114106
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
115107
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
116108

117-
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
118-
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
119-
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
120-
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
109+
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
110+
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
111+
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
112+
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
121113

122-
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
123-
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
114+
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
115+
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
124116

125-
BOOL_DTYPES: List[Dtype] = [bool, "bool"]
126-
BYTES_DTYPES: List[Dtype] = [bytes, "bytes"]
127-
OBJECT_DTYPES: List[Dtype] = [object, "object"]
117+
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
118+
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
119+
OBJECT_DTYPES: list[Dtype] = [object, "object"]
128120

129121
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
130122
ALL_NUMPY_DTYPES = (
@@ -417,7 +409,7 @@ def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
417409
----------
418410
k: length of each of the index instances
419411
"""
420-
make_index_funcs: List[Callable[..., Index]] = [
412+
make_index_funcs: list[Callable[..., Index]] = [
421413
makeDateIndex,
422414
makePeriodIndex,
423415
makeTimedeltaIndex,
@@ -865,7 +857,7 @@ def skipna_wrapper(x):
865857
return skipna_wrapper
866858

867859

868-
def convert_rows_list_to_csv_str(rows_list: List[str]):
860+
def convert_rows_list_to_csv_str(rows_list: list[str]):
869861
"""
870862
Convert list of CSV rows to single CSV-formatted string for current OS.
871863
@@ -885,7 +877,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]):
885877
return sep.join(rows_list) + sep
886878

887879

888-
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
880+
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
889881
"""
890882
Helper function to mark pytest.raises that have an external error message.
891883

pandas/compat/pickle_compat.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import copy
88
import io
99
import pickle as pkl
10-
from typing import TYPE_CHECKING, Optional
10+
from typing import TYPE_CHECKING
1111
import warnings
1212

1313
from pandas._libs.tslibs import BaseOffset
@@ -229,7 +229,7 @@ def load_newobj_ex(self):
229229
pass
230230

231231

232-
def load(fh, encoding: Optional[str] = None, is_verbose: bool = False):
232+
def load(fh, encoding: str | None = None, is_verbose: bool = False):
233233
"""
234234
Load a pickle, with a provided encoding,
235235

pandas/core/aggregation.py

+13-17
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,10 @@
1212
Any,
1313
Callable,
1414
DefaultDict,
15-
Dict,
1615
Hashable,
1716
Iterable,
1817
List,
19-
Optional,
2018
Sequence,
21-
Tuple,
22-
Union,
2319
cast,
2420
)
2521

@@ -47,8 +43,8 @@
4743

4844

4945
def reconstruct_func(
50-
func: Optional[AggFuncType], **kwargs
51-
) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]:
46+
func: AggFuncType | None, **kwargs
47+
) -> tuple[bool, AggFuncType | None, list[str] | None, list[int] | None]:
5248
"""
5349
This is the internal function to reconstruct func given if there is relabeling
5450
or not and also normalize the keyword to get new order of columns.
@@ -86,8 +82,8 @@ def reconstruct_func(
8682
(False, 'min', None, None)
8783
"""
8884
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
89-
columns: Optional[List[str]] = None
90-
order: Optional[List[int]] = None
85+
columns: list[str] | None = None
86+
order: list[int] | None = None
9187

9288
if not relabeling:
9389
if isinstance(func, list) and len(func) > len(set(func)):
@@ -134,7 +130,7 @@ def is_multi_agg_with_relabel(**kwargs) -> bool:
134130
)
135131

136132

137-
def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
133+
def normalize_keyword_aggregation(kwargs: dict) -> tuple[dict, list[str], list[int]]:
138134
"""
139135
Normalize user-provided "named aggregation" kwargs.
140136
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
@@ -190,8 +186,8 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i
190186

191187

192188
def _make_unique_kwarg_list(
193-
seq: Sequence[Tuple[Any, Any]]
194-
) -> Sequence[Tuple[Any, Any]]:
189+
seq: Sequence[tuple[Any, Any]]
190+
) -> Sequence[tuple[Any, Any]]:
195191
"""
196192
Uniquify aggfunc name of the pairs in the order list
197193
@@ -295,10 +291,10 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any:
295291

296292
def relabel_result(
297293
result: FrameOrSeries,
298-
func: Dict[str, List[Union[Callable, str]]],
294+
func: dict[str, list[Callable | str]],
299295
columns: Iterable[Hashable],
300296
order: Iterable[int],
301-
) -> Dict[Hashable, Series]:
297+
) -> dict[Hashable, Series]:
302298
"""
303299
Internal function to reorder result if relabelling is True for
304300
dataframe.agg, and return the reordered result in dict.
@@ -325,7 +321,7 @@ def relabel_result(
325321
reordered_indexes = [
326322
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
327323
]
328-
reordered_result_in_dict: Dict[Hashable, Series] = {}
324+
reordered_result_in_dict: dict[Hashable, Series] = {}
329325
idx = 0
330326

331327
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
@@ -369,7 +365,7 @@ def relabel_result(
369365

370366
def validate_func_kwargs(
371367
kwargs: dict,
372-
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
368+
) -> tuple[list[str], list[str | Callable[..., Any]]]:
373369
"""
374370
Validates types of user-provided "named aggregation" kwargs.
375371
`TypeError` is raised if aggfunc is not `str` or callable.
@@ -495,7 +491,7 @@ def transform_dict_like(
495491
# GH 15931 - deprecation of renaming keys
496492
raise SpecificationError("nested renamer is not supported")
497493

498-
results: Dict[Hashable, FrameOrSeriesUnion] = {}
494+
results: dict[Hashable, FrameOrSeriesUnion] = {}
499495
for name, how in func.items():
500496
colg = obj._gotitem(name, ndim=1)
501497
try:
@@ -536,7 +532,7 @@ def transform_str_or_callable(
536532

537533
def agg_list_like(
538534
obj: AggObjType,
539-
arg: List[AggFuncTypeBase],
535+
arg: list[AggFuncTypeBase],
540536
_axis: int,
541537
) -> FrameOrSeriesUnion:
542538
"""

pandas/core/algorithms.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
import operator
88
from textwrap import dedent
9-
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, cast
9+
from typing import TYPE_CHECKING, Union, cast
1010
from warnings import catch_warnings, simplefilter, warn
1111

1212
import numpy as np
@@ -70,15 +70,15 @@
7070
from pandas import Categorical, DataFrame, Index, Series
7171
from pandas.core.arrays import DatetimeArray, TimedeltaArray
7272

73-
_shared_docs: Dict[str, str] = {}
73+
_shared_docs: dict[str, str] = {}
7474

7575

7676
# --------------- #
7777
# dtype access #
7878
# --------------- #
7979
def _ensure_data(
80-
values: ArrayLike, dtype: Optional[DtypeObj] = None
81-
) -> Tuple[np.ndarray, DtypeObj]:
80+
values: ArrayLike, dtype: DtypeObj | None = None
81+
) -> tuple[np.ndarray, DtypeObj]:
8282
"""
8383
routine to ensure that our data is of the correct
8484
input dtype for lower-level routines
@@ -495,7 +495,7 @@ def f(c, v):
495495

496496
def factorize_array(
497497
values: np.ndarray, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None
498-
) -> Tuple[np.ndarray, np.ndarray]:
498+
) -> tuple[np.ndarray, np.ndarray]:
499499
"""
500500
Factorize an array-like to codes and uniques.
501501
@@ -558,9 +558,9 @@ def factorize_array(
558558
def factorize(
559559
values,
560560
sort: bool = False,
561-
na_sentinel: Optional[int] = -1,
562-
size_hint: Optional[int] = None,
563-
) -> Tuple[np.ndarray, Union[np.ndarray, Index]]:
561+
na_sentinel: int | None = -1,
562+
size_hint: int | None = None,
563+
) -> tuple[np.ndarray, np.ndarray | Index]:
564564
"""
565565
Encode the object as an enumerated type or categorical variable.
566566
@@ -2052,7 +2052,7 @@ def safe_sort(
20522052
na_sentinel: int = -1,
20532053
assume_unique: bool = False,
20542054
verify: bool = True,
2055-
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
2055+
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
20562056
"""
20572057
Sort ``values`` and reorder corresponding ``codes``.
20582058

pandas/core/apply.py

+12-23
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,7 @@
22

33
import abc
44
import inspect
5-
from typing import (
6-
TYPE_CHECKING,
7-
Any,
8-
Dict,
9-
Iterator,
10-
List,
11-
Optional,
12-
Tuple,
13-
Type,
14-
Union,
15-
cast,
16-
)
5+
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, cast
176

187
import numpy as np
198

@@ -58,13 +47,13 @@ def frame_apply(
5847
func: AggFuncType,
5948
axis: Axis = 0,
6049
raw: bool = False,
61-
result_type: Optional[str] = None,
50+
result_type: str | None = None,
6251
args=None,
6352
kwds=None,
6453
) -> FrameApply:
6554
""" construct and return a row or column based frame apply object """
6655
axis = obj._get_axis_number(axis)
67-
klass: Type[FrameApply]
56+
klass: type[FrameApply]
6857
if axis == 0:
6958
klass = FrameRowApply
7059
elif axis == 1:
@@ -104,7 +93,7 @@ def __init__(
10493
obj: AggObjType,
10594
func,
10695
raw: bool,
107-
result_type: Optional[str],
96+
result_type: str | None,
10897
args,
10998
kwds,
11099
):
@@ -144,7 +133,7 @@ def index(self) -> Index:
144133
def apply(self) -> FrameOrSeriesUnion:
145134
pass
146135

147-
def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]:
136+
def agg(self) -> tuple[FrameOrSeriesUnion | None, bool | None]:
148137
"""
149138
Provide an implementation for the aggregators.
150139
@@ -188,7 +177,7 @@ def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]:
188177
# caller can react
189178
return result, True
190179

191-
def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:
180+
def maybe_apply_str(self) -> FrameOrSeriesUnion | None:
192181
"""
193182
Compute apply in case of a string.
194183
@@ -212,7 +201,7 @@ def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]:
212201
raise ValueError(f"Operation {f} does not support axis=1")
213202
return self.obj._try_aggregate_string_function(f, *self.args, **self.kwds)
214203

215-
def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]:
204+
def maybe_apply_multiple(self) -> FrameOrSeriesUnion | None:
216205
"""
217206
Compute apply in case of a list-like or dict-like.
218207
@@ -411,7 +400,7 @@ def apply_standard(self):
411400
# wrap results
412401
return self.wrap_results(results, res_index)
413402

414-
def apply_series_generator(self) -> Tuple[ResType, Index]:
403+
def apply_series_generator(self) -> tuple[ResType, Index]:
415404
assert callable(self.f)
416405

417406
series_gen = self.series_generator
@@ -664,11 +653,11 @@ def apply_standard(self) -> FrameOrSeriesUnion:
664653

665654

666655
class GroupByApply(Apply):
667-
obj: Union[SeriesGroupBy, DataFrameGroupBy]
656+
obj: SeriesGroupBy | DataFrameGroupBy
668657

669658
def __init__(
670659
self,
671-
obj: Union[SeriesGroupBy, DataFrameGroupBy],
660+
obj: SeriesGroupBy | DataFrameGroupBy,
672661
func: AggFuncType,
673662
args,
674663
kwds,
@@ -690,11 +679,11 @@ def apply(self):
690679

691680
class ResamplerWindowApply(Apply):
692681
axis = 0
693-
obj: Union[Resampler, BaseWindow]
682+
obj: Resampler | BaseWindow
694683

695684
def __init__(
696685
self,
697-
obj: Union[Resampler, BaseWindow],
686+
obj: Resampler | BaseWindow,
698687
func: AggFuncType,
699688
args,
700689
kwds,

0 commit comments

Comments
 (0)