Skip to content

Commit 57d8d3a

Browse files
UPGRADE: Autoupdate pre-commit config (#49428)
* gh 94906 gh 94906 * fixup * fixup * pyupgrade * clean up * update * updates * fixup * fixup use_time in vectorized.pyx * fixup use_time in vectorized.pyx * changes * up1 * up2 * up3 * remove duplicated line, split acc. to black formatter * remove added newline * remove misplaced commas Co-authored-by: MarcoGorelli <> Co-authored-by: Marco Edward Gorelli <[email protected]>
1 parent f6204a5 commit 57d8d3a

25 files changed

+274
-133
lines changed

.pre-commit-config.yaml

+6-6
Original file line numberDiff line numberDiff line change
@@ -18,16 +18,16 @@ repos:
1818
pass_filenames: true
1919
require_serial: false
2020
- repo: https://github.com/python/black
21-
rev: 22.8.0
21+
rev: 22.10.0
2222
hooks:
2323
- id: black
2424
- repo: https://github.com/codespell-project/codespell
25-
rev: v2.2.1
25+
rev: v2.2.2
2626
hooks:
2727
- id: codespell
2828
types_or: [python, rst, markdown]
2929
- repo: https://github.com/MarcoGorelli/cython-lint
30-
rev: v0.1.8
30+
rev: v0.2.1
3131
hooks:
3232
- id: cython-lint
3333
- repo: https://github.com/pre-commit/pre-commit-hooks
@@ -60,15 +60,15 @@ repos:
6060
- flake8-bugbear==22.7.1
6161
- pandas-dev-flaker==0.5.0
6262
- repo: https://github.com/pycqa/pylint
63-
rev: v2.15.3
63+
rev: v2.15.5
6464
hooks:
6565
- id: pylint
6666
- repo: https://github.com/PyCQA/isort
6767
rev: 5.10.1
6868
hooks:
6969
- id: isort
7070
- repo: https://github.com/asottile/pyupgrade
71-
rev: v2.38.2
71+
rev: v3.2.0
7272
hooks:
7373
- id: pyupgrade
7474
args: [--py38-plus]
@@ -83,7 +83,7 @@ repos:
8383
types: [text] # overwrite types: [rst]
8484
types_or: [python, rst]
8585
- repo: https://github.com/sphinx-contrib/sphinx-lint
86-
rev: v0.6.1
86+
rev: v0.6.7
8787
hooks:
8888
- id: sphinx-lint
8989
- repo: https://github.com/asottile/yesqa

pandas/_libs/algos.pyx

+40-17
Original file line numberDiff line numberDiff line change
@@ -81,26 +81,48 @@ class Infinity:
8181
"""
8282
Provide a positive Infinity comparison method for ranking.
8383
"""
84-
__lt__ = lambda self, other: False
85-
__le__ = lambda self, other: isinstance(other, Infinity)
86-
__eq__ = lambda self, other: isinstance(other, Infinity)
87-
__ne__ = lambda self, other: not isinstance(other, Infinity)
88-
__gt__ = lambda self, other: (not isinstance(other, Infinity) and
89-
not missing.checknull(other))
90-
__ge__ = lambda self, other: not missing.checknull(other)
84+
def __lt__(self, other):
85+
return False
86+
87+
def __le__(self, other):
88+
return isinstance(other, Infinity)
89+
90+
def __eq__(self, other):
91+
return isinstance(other, Infinity)
92+
93+
def __ne__(self, other):
94+
return not isinstance(other, Infinity)
95+
96+
def __gt__(self, other):
97+
return (not isinstance(other, Infinity) and
98+
not missing.checknull(other))
99+
100+
def __ge__(self, other):
101+
return not missing.checknull(other)
91102

92103

93104
class NegInfinity:
94105
"""
95106
Provide a negative Infinity comparison method for ranking.
96107
"""
97-
__lt__ = lambda self, other: (not isinstance(other, NegInfinity) and
98-
not missing.checknull(other))
99-
__le__ = lambda self, other: not missing.checknull(other)
100-
__eq__ = lambda self, other: isinstance(other, NegInfinity)
101-
__ne__ = lambda self, other: not isinstance(other, NegInfinity)
102-
__gt__ = lambda self, other: False
103-
__ge__ = lambda self, other: isinstance(other, NegInfinity)
108+
def __lt__(self, other):
109+
return (not isinstance(other, NegInfinity) and
110+
not missing.checknull(other))
111+
112+
def __le__(self, other):
113+
return not missing.checknull(other)
114+
115+
def __eq__(self, other):
116+
return isinstance(other, NegInfinity)
117+
118+
def __ne__(self, other):
119+
return not isinstance(other, NegInfinity)
120+
121+
def __gt__(self, other):
122+
return False
123+
124+
def __ge__(self, other):
125+
return isinstance(other, NegInfinity)
104126

105127

106128
@cython.wraparound(False)
@@ -321,7 +343,7 @@ def kth_smallest(numeric_t[::1] arr, Py_ssize_t k) -> numeric_t:
321343
@cython.cdivision(True)
322344
def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
323345
cdef:
324-
Py_ssize_t i, j, xi, yi, N, K
346+
Py_ssize_t i, xi, yi, N, K
325347
bint minpv
326348
float64_t[:, ::1] result
327349
ndarray[uint8_t, ndim=2] mask
@@ -377,7 +399,7 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
377399
@cython.wraparound(False)
378400
def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1) -> ndarray:
379401
cdef:
380-
Py_ssize_t i, j, xi, yi, N, K
402+
Py_ssize_t i, xi, yi, N, K
381403
ndarray[float64_t, ndim=2] result
382404
ndarray[float64_t, ndim=2] ranked_mat
383405
ndarray[float64_t, ndim=1] rankedx, rankedy
@@ -746,7 +768,8 @@ def is_monotonic(ndarray[numeric_object_t, ndim=1] arr, bint timelike):
746768
n = len(arr)
747769

748770
if n == 1:
749-
if arr[0] != arr[0] or (numeric_object_t is int64_t and timelike and arr[0] == NPY_NAT):
771+
if arr[0] != arr[0] or (numeric_object_t is int64_t and timelike and
772+
arr[0] == NPY_NAT):
750773
# single value is NaN
751774
return False, False, True
752775
else:

pandas/_libs/groupby.pyx

+31-15
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def group_cumprod(
265265
This method modifies the `out` parameter, rather than returning an object.
266266
"""
267267
cdef:
268-
Py_ssize_t i, j, N, K, size
268+
Py_ssize_t i, j, N, K
269269
int64float_t val, na_val
270270
int64float_t[:, ::1] accum
271271
intp_t lab
@@ -356,7 +356,7 @@ def group_cumsum(
356356
This method modifies the `out` parameter, rather than returning an object.
357357
"""
358358
cdef:
359-
Py_ssize_t i, j, N, K, size
359+
Py_ssize_t i, j, N, K
360360
int64float_t val, y, t, na_val
361361
int64float_t[:, ::1] accum, compensation
362362
uint8_t[:, ::1] accum_mask
@@ -441,7 +441,7 @@ def group_shift_indexer(
441441
int periods,
442442
) -> None:
443443
cdef:
444-
Py_ssize_t N, i, j, ii, lab
444+
Py_ssize_t N, i, ii, lab
445445
int offset = 0, sign
446446
int64_t idxer, idxer_slot
447447
int64_t[::1] label_seen = np.zeros(ngroups, dtype=np.int64)
@@ -743,8 +743,11 @@ def group_sum(
743743
# is otherwise the same as in _treat_as_na
744744
if uses_mask:
745745
isna_entry = mask[i, j]
746-
elif (sum_t is float32_t or sum_t is float64_t
747-
or sum_t is complex64_t or sum_t is complex64_t):
746+
elif (
747+
sum_t is float32_t
748+
or sum_t is float64_t
749+
or sum_t is complex64_t
750+
):
748751
# avoid warnings because of equality comparison
749752
isna_entry = not val == val
750753
elif sum_t is int64_t and is_datetimelike and val == NPY_NAT:
@@ -770,8 +773,11 @@ def group_sum(
770773
# set a placeholder value in out[i, j].
771774
if uses_mask:
772775
result_mask[i, j] = True
773-
elif (sum_t is float32_t or sum_t is float64_t
774-
or sum_t is complex64_t or sum_t is complex64_t):
776+
elif (
777+
sum_t is float32_t
778+
or sum_t is float64_t
779+
or sum_t is complex64_t
780+
):
775781
out[i, j] = NAN
776782
elif sum_t is int64_t:
777783
out[i, j] = NPY_NAT
@@ -799,7 +805,7 @@ def group_prod(
799805
"""
800806
cdef:
801807
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
802-
int64float_t val, count
808+
int64float_t val
803809
int64float_t[:, ::1] prodx
804810
int64_t[:, ::1] nobs
805811
Py_ssize_t len_values = len(values), len_labels = len(labels)
@@ -872,7 +878,7 @@ def group_var(
872878
floating[:, ::1] mean
873879
int64_t[:, ::1] nobs
874880
Py_ssize_t len_values = len(values), len_labels = len(labels)
875-
bint isna_entry, uses_mask = not mask is None
881+
bint isna_entry, uses_mask = mask is not None
876882

877883
assert min_count == -1, "'min_count' only used in sum and prod"
878884

@@ -969,7 +975,7 @@ def group_mean(
969975
mean_t[:, ::1] sumx, compensation
970976
int64_t[:, ::1] nobs
971977
Py_ssize_t len_values = len(values), len_labels = len(labels)
972-
bint isna_entry, uses_mask = not mask is None
978+
bint isna_entry, uses_mask = mask is not None
973979

974980
assert min_count == -1, "'min_count' only used in sum and prod"
975981

@@ -1042,10 +1048,10 @@ def group_ohlc(
10421048
Only aggregates on axis=0
10431049
"""
10441050
cdef:
1045-
Py_ssize_t i, j, N, K, lab
1051+
Py_ssize_t i, N, K, lab
10461052
int64float_t val
10471053
uint8_t[::1] first_element_set
1048-
bint isna_entry, uses_mask = not mask is None
1054+
bint isna_entry, uses_mask = mask is not None
10491055

10501056
assert min_count == -1, "'min_count' only used in sum and prod"
10511057

@@ -1240,7 +1246,11 @@ cdef inline bint _treat_as_na(numeric_object_t val, bint is_datetimelike) nogil:
12401246
return False
12411247

12421248

1243-
cdef numeric_object_t _get_min_or_max(numeric_object_t val, bint compute_max, bint is_datetimelike):
1249+
cdef numeric_object_t _get_min_or_max(
1250+
numeric_object_t val,
1251+
bint compute_max,
1252+
bint is_datetimelike,
1253+
):
12441254
"""
12451255
Find either the min or the max supported by numeric_object_t; 'val' is a
12461256
placeholder to effectively make numeric_object_t an argument.
@@ -1366,7 +1376,10 @@ def group_last(
13661376
# set a placeholder value in out[i, j].
13671377
if uses_mask:
13681378
result_mask[i, j] = True
1369-
elif numeric_object_t is float32_t or numeric_object_t is float64_t:
1379+
elif (
1380+
numeric_object_t is float32_t
1381+
or numeric_object_t is float64_t
1382+
):
13701383
out[i, j] = NAN
13711384
elif numeric_object_t is int64_t:
13721385
# Per above, this is a placeholder in
@@ -1486,7 +1499,10 @@ def group_nth(
14861499
# it was initialized with np.empty. Also ensures
14871500
# we can downcast out if appropriate.
14881501
out[i, j] = 0
1489-
elif numeric_object_t is float32_t or numeric_object_t is float64_t:
1502+
elif (
1503+
numeric_object_t is float32_t
1504+
or numeric_object_t is float64_t
1505+
):
14901506
out[i, j] = NAN
14911507
elif numeric_object_t is int64_t:
14921508
# Per above, this is a placeholder in

pandas/_libs/internals.pyx

+9-3
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ cdef class BlockPlacement:
133133
@property
134134
def as_array(self) -> np.ndarray:
135135
cdef:
136-
Py_ssize_t start, stop, end, _
136+
Py_ssize_t start, stop, _
137137

138138
if not self._has_array:
139139
start, stop, step, _ = slice_get_indices_ex(self._as_slice)
@@ -259,7 +259,6 @@ cdef class BlockPlacement:
259259
"""
260260
cdef:
261261
slice slc = self._ensure_has_slice()
262-
slice new_slice
263262
ndarray[intp_t, ndim=1] new_placement
264263

265264
if slc is not None and slc.step == 1:
@@ -678,7 +677,14 @@ cdef class BlockManager:
678677
public list refs
679678
public object parent
680679

681-
def __cinit__(self, blocks=None, axes=None, refs=None, parent=None, verify_integrity=True):
680+
def __cinit__(
681+
self,
682+
blocks=None,
683+
axes=None,
684+
refs=None,
685+
parent=None,
686+
verify_integrity=True,
687+
):
682688
# None as defaults for unpickling GH#42345
683689
if blocks is None:
684690
# This adds 1-2 microseconds to DataFrame(np.array([]))

pandas/_libs/join.pyx

+3-3
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ def left_join_indexer_unique(
275275
cdef:
276276
Py_ssize_t i, j, nleft, nright
277277
ndarray[intp_t] indexer
278-
numeric_object_t lval, rval
278+
numeric_object_t rval
279279

280280
i = 0
281281
j = 0
@@ -324,7 +324,7 @@ def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
324324
is non-unique (if both were unique we'd use left_join_indexer_unique).
325325
"""
326326
cdef:
327-
Py_ssize_t i, j, k, nright, nleft, count
327+
Py_ssize_t i, j, nright, nleft, count
328328
numeric_object_t lval, rval
329329
ndarray[intp_t] lindexer, rindexer
330330
ndarray[numeric_object_t] result
@@ -434,7 +434,7 @@ def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
434434
Both left and right are monotonic increasing but not necessarily unique.
435435
"""
436436
cdef:
437-
Py_ssize_t i, j, k, nright, nleft, count
437+
Py_ssize_t i, j, nright, nleft, count
438438
numeric_object_t lval, rval
439439
ndarray[intp_t] lindexer, rindexer
440440
ndarray[numeric_object_t] result

0 commit comments

Comments
 (0)