Skip to content

Commit d39fc3b

Browse files
github-actions[bot]MarcoGorelli
authored andcommitted
pre-commit autoupdate (except for black formatter) (pandas-dev#41160)
Co-authored-by: MarcoGorelli <[email protected]>
1 parent c5490cf commit d39fc3b

File tree

16 files changed

+33
-35
lines changed

16 files changed

+33
-35
lines changed

.pre-commit-config.yaml

+4-4
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@ repos:
1919
types_or: [python, rst, markdown]
2020
files: ^(pandas|doc)/
2121
- repo: https://github.com/pre-commit/pre-commit-hooks
22-
rev: v3.4.0
22+
rev: v4.0.1
2323
hooks:
2424
- id: debug-statements
2525
- id: end-of-file-fixer
2626
exclude: \.txt$
2727
- id: trailing-whitespace
2828
- repo: https://github.com/cpplint/cpplint
29-
rev: f7061b1 # the latest tag does not have the hook
29+
rev: 1.5.5
3030
hooks:
3131
- id: cpplint
3232
# We don't lint all C files because we don't want to lint any that are built
@@ -57,7 +57,7 @@ repos:
5757
hooks:
5858
- id: isort
5959
- repo: https://github.com/asottile/pyupgrade
60-
rev: v2.12.0
60+
rev: v2.18.3
6161
hooks:
6262
- id: pyupgrade
6363
args: [--py37-plus]
@@ -72,7 +72,7 @@ repos:
7272
types: [text] # overwrite types: [rst]
7373
types_or: [python, rst]
7474
- repo: https://github.com/asottile/yesqa
75-
rev: v1.2.2
75+
rev: v1.2.3
7676
hooks:
7777
- id: yesqa
7878
additional_dependencies:

doc/sphinxext/announce.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454

5555
def get_authors(revision_range):
5656
pat = "^.*\\t(.*)$"
57-
lst_release, cur_release = [r.strip() for r in revision_range.split("..")]
57+
lst_release, cur_release = (r.strip() for r in revision_range.split(".."))
5858

5959
if "|" in cur_release:
6060
# e.g. v1.0.1|HEAD
@@ -119,7 +119,7 @@ def get_pull_requests(repo, revision_range):
119119

120120

121121
def build_components(revision_range, heading="Contributors"):
122-
lst_release, cur_release = [r.strip() for r in revision_range.split("..")]
122+
lst_release, cur_release = (r.strip() for r in revision_range.split(".."))
123123
authors = get_authors(revision_range)
124124

125125
return {

pandas/_config/config.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ def _describe_option(pat: str = "", _print_desc: bool = True):
157157
if len(keys) == 0:
158158
raise OptionError("No such keys(s)")
159159

160-
s = "\n".join([_build_option_description(k) for k in keys])
160+
s = "\n".join(_build_option_description(k) for k in keys)
161161

162162
if _print_desc:
163163
print(s)

pandas/core/reshape/merge.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1475,7 +1475,7 @@ def get_join_indexers(
14751475
for n in range(len(left_keys))
14761476
)
14771477
zipped = zip(*mapped)
1478-
llab, rlab, shape = [list(x) for x in zipped]
1478+
llab, rlab, shape = (list(x) for x in zipped)
14791479

14801480
# get flat i8 keys from label lists
14811481
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
@@ -1985,7 +1985,7 @@ def _get_multiindex_indexer(
19851985
for n in range(index.nlevels)
19861986
)
19871987
zipped = zip(*mapped)
1988-
rcodes, lcodes, shape = [list(x) for x in zipped]
1988+
rcodes, lcodes, shape = (list(x) for x in zipped)
19891989
if sort:
19901990
rcodes = list(map(np.take, rcodes, index.codes))
19911991
else:

pandas/core/reshape/tile.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ def cut(
250250
raise ValueError("Cannot cut empty array")
251251

252252
rng = (nanops.nanmin(x), nanops.nanmax(x))
253-
mn, mx = [mi + 0.0 for mi in rng]
253+
mn, mx = (mi + 0.0 for mi in rng)
254254

255255
if np.isinf(mn) or np.isinf(mx):
256256
# GH 24314

pandas/io/formats/excel.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -773,7 +773,7 @@ def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
773773
series = self.df.iloc[:, colidx]
774774
for i, val in enumerate(series):
775775
if styles is not None:
776-
css = ";".join([a + ":" + str(v) for (a, v) in styles[i, colidx]])
776+
css = ";".join(a + ":" + str(v) for (a, v) in styles[i, colidx])
777777
xlstyle = self.style_converter(css)
778778
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val, xlstyle)
779779

pandas/io/formats/latex.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -361,7 +361,7 @@ def get_result(self) -> str:
361361
self.bottom_separator,
362362
self.env_end,
363363
]
364-
result = "\n".join([item for item in elements if item])
364+
result = "\n".join(item for item in elements if item)
365365
trailing_newline = "\n"
366366
result += trailing_newline
367367
return result
@@ -530,13 +530,13 @@ def env_begin(self) -> str:
530530
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
531531
)
532532
elements = [first_row, f"{self._caption_and_label()}"]
533-
return "\n".join([item for item in elements if item])
533+
return "\n".join(item for item in elements if item)
534534

535535
def _caption_and_label(self) -> str:
536536
if self.caption or self.label:
537537
double_backslash = "\\\\"
538538
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
539-
caption_and_label = "\n".join([item for item in elements if item])
539+
caption_and_label = "\n".join(item for item in elements if item)
540540
caption_and_label += double_backslash
541541
return caption_and_label
542542
else:
@@ -614,7 +614,7 @@ def env_begin(self) -> str:
614614
f"{self._label_macro}",
615615
f"\\begin{{tabular}}{{{self.column_format}}}",
616616
]
617-
return "\n".join([item for item in elements if item])
617+
return "\n".join(item for item in elements if item)
618618

619619
@property
620620
def bottom_separator(self) -> str:

pandas/io/html.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -627,7 +627,7 @@ def _build_xpath_expr(attrs) -> str:
627627
if "class_" in attrs:
628628
attrs["class"] = attrs.pop("class_")
629629

630-
s = " and ".join([f"@{k}={repr(v)}" for k, v in attrs.items()])
630+
s = " and ".join(f"@{k}={repr(v)}" for k, v in attrs.items())
631631
return f"[{s}]"
632632

633633

pandas/io/pytables.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3513,7 +3513,7 @@ def validate_version(self, where=None):
35133513
""" are we trying to operate on an old version? """
35143514
if where is not None:
35153515
if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1:
3516-
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
3516+
ws = incompatibility_doc % ".".join(str(x) for x in self.version)
35173517
warnings.warn(ws, IncompatibilityWarning)
35183518

35193519
def validate_min_itemsize(self, min_itemsize):

pandas/tests/generic/test_frame.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def finalize(self, other, method=None, **kwargs):
126126
for name in self._metadata:
127127
if method == "concat":
128128
value = "+".join(
129-
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
129+
getattr(o, name) for o in other.objs if getattr(o, name, None)
130130
)
131131
object.__setattr__(self, name, value)
132132
else:

pandas/tests/generic/test_series.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def finalize(self, other, method=None, **kwargs):
130130
for name in self._metadata:
131131
if method == "concat" and name == "filename":
132132
value = "+".join(
133-
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
133+
getattr(o, name) for o in other.objs if getattr(o, name, None)
134134
)
135135
object.__setattr__(self, name, value)
136136
else:

pandas/tests/io/parser/test_c_parser_only.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -434,10 +434,10 @@ def test_internal_null_byte(c_parser_only):
434434
def test_read_nrows_large(c_parser_only):
435435
# gh-7626 - Read only nrows of data in for large inputs (>262144b)
436436
parser = c_parser_only
437-
header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n"
438-
data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n"
439-
header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n"
440-
data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n"
437+
header_narrow = "\t".join("COL_HEADER_" + str(i) for i in range(10)) + "\n"
438+
data_narrow = "\t".join("somedatasomedatasomedata1" for _ in range(10)) + "\n"
439+
header_wide = "\t".join("COL_HEADER_" + str(i) for i in range(15)) + "\n"
440+
data_wide = "\t".join("somedatasomedatasomedata2" for _ in range(15)) + "\n"
441441
test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2
442442

443443
df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010)
@@ -565,7 +565,7 @@ def test_bytes_exceed_2gb(c_parser_only):
565565
if parser.low_memory:
566566
pytest.skip("not a high_memory test")
567567

568-
csv = StringIO("strings\n" + "\n".join(["x" * (1 << 20) for _ in range(2100)]))
568+
csv = StringIO("strings\n" + "\n".join("x" * (1 << 20) for _ in range(2100)))
569569
df = parser.read_csv(csv)
570570
assert not df.empty
571571

pandas/tests/io/parser/test_multi_thread.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def test_multi_thread_string_io_read_csv(all_parsers):
4444
num_files = 100
4545

4646
bytes_to_df = [
47-
"\n".join([f"{i:d},{i:d},{i:d}" for i in range(max_row_range)]).encode()
47+
"\n".join(f"{i:d},{i:d},{i:d}" for i in range(max_row_range)).encode()
4848
for _ in range(num_files)
4949
]
5050

pandas/tests/io/parser/test_skiprows.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,10 @@ def test_deep_skip_rows(all_parsers):
4949
# see gh-4382
5050
parser = all_parsers
5151
data = "a,b,c\n" + "\n".join(
52-
[",".join([str(i), str(i + 1), str(i + 2)]) for i in range(10)]
52+
",".join([str(i), str(i + 1), str(i + 2)]) for i in range(10)
5353
)
5454
condensed_data = "a,b,c\n" + "\n".join(
55-
[",".join([str(i), str(i + 1), str(i + 2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]]
55+
",".join([str(i), str(i + 1), str(i + 2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]
5656
)
5757

5858
result = parser.read_csv(StringIO(data), skiprows=[6, 8])

pandas/tests/reshape/test_util.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def test_datetimeindex(self):
2222
# regression test for GitHub issue #6439
2323
# make sure that the ordering on datetimeindex is consistent
2424
x = date_range("2000-01-01", periods=2)
25-
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
25+
result1, result2 = (Index(y).day for y in cartesian_product([x, x]))
2626
expected1 = Index([1, 1, 2, 2])
2727
expected2 = Index([1, 2, 1, 2])
2828
tm.assert_index_equal(result1, expected1)

pandas/util/_decorators.py

+5-7
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ def _format_argument_list(allow_args: list[str]):
245245
return f" except for the argument '{allow_args[0]}'"
246246
else:
247247
last = allow_args[-1]
248-
args = ", ".join(["'" + x + "'" for x in allow_args[:-1]])
248+
args = ", ".join("'" + x + "'" for x in allow_args[:-1])
249249
return f" except for the arguments {args} and '{last}'"
250250

251251

@@ -385,12 +385,10 @@ def decorator(decorated: F) -> F:
385385

386386
# formatting templates and concatenating docstring
387387
decorated.__doc__ = "".join(
388-
[
389-
component.format(**params)
390-
if isinstance(component, str)
391-
else dedent(component.__doc__ or "")
392-
for component in docstring_components
393-
]
388+
component.format(**params)
389+
if isinstance(component, str)
390+
else dedent(component.__doc__ or "")
391+
for component in docstring_components
394392
)
395393

396394
# error: "F" has no attribute "_docstring_components"

0 commit comments

Comments
 (0)