Skip to content

⬆️ UPGRADE: Autoupdate pre-commit config #42151

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 22, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ repos:
- id: absolufy-imports
files: ^pandas/
- repo: https://github.com/python/black
rev: 21.5b2
rev: 21.6b0
hooks:
- id: black
- repo: https://github.com/codespell-project/codespell
rev: v2.0.0
rev: v2.1.0
hooks:
- id: codespell
types_or: [python, rst, markdown]
Expand Down Expand Up @@ -53,16 +53,16 @@ repos:
types: [text]
args: [--append-config=flake8/cython-template.cfg]
- repo: https://github.com/PyCQA/isort
rev: 5.8.0
rev: 5.9.0
hooks:
- id: isort
- repo: https://github.com/asottile/pyupgrade
rev: v2.18.3
rev: v2.19.4
hooks:
- id: pyupgrade
args: [--py37-plus]
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.8.0
rev: v1.9.0
hooks:
- id: rst-backticks
- id: rst-directive-colons
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -1448,7 +1448,7 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
)

result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs], **kwargs)
result = getattr(ufunc, method)(*(np.asarray(x) for x in inputs), **kwargs)
if out:
if len(out) == 1:
out = out[0]
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/scipy_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def _get_label_to_i_dict(labels, sort_labels=False):
return {k: i for i, k in enumerate(labels)}

def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
ilabels = list(zip(*[index._get_level_values(i) for i in subset]))
ilabels = list(zip(*(index._get_level_values(i) for i in subset)))
labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@ def interpolate_1d(
# preserve NaNs on the inside
preserve_nans |= mid_nans

# sort preserve_nans and covert to list
# sort preserve_nans and convert to list
preserve_nans = sorted(preserve_nans)

result = yvalues.copy()
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/reshape/melt.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def lreshape(data: DataFrame, groups, dropna: bool = True, label=None) -> DataFr
else:
keys, values = zip(*groups)

all_cols = list(set.union(*[set(x) for x in values]))
all_cols = list(set.union(*(set(x) for x in values)))
id_cols = list(data.columns.difference(all_cols))

K = len(values[0])
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -861,7 +861,7 @@ def space_format(x, y):
return y

str_columns = list(
zip(*[[space_format(x, y) for y in x] for x in fmt_columns])
zip(*([space_format(x, y) for y in x] for x in fmt_columns))
)
if self.sparsify and len(str_columns):
str_columns = sparsify_labels(str_columns)
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -963,7 +963,7 @@ def insert(self, chunksize: int | None = None, method: str | None = None):
if start_i >= end_i:
break

chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
exec_insert(conn, keys, chunk_iter)

def _query_iterator(
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/frame/methods/test_describe.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ def test_describe_percentiles_integer_idx(self):
result = df.describe(percentiles=pct)

expected = DataFrame(
{"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]},
{"x": [1.0, 1.0, np.NaN, 1.0, *(1.0 for _ in pct), 1.0]},
index=[
"count",
"mean",
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/frame/test_arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -763,7 +763,7 @@ def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
dtype = np.common_type(*(x.values for x in exvals.values()))

expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)

Expand Down
6 changes: 3 additions & 3 deletions pandas/tests/indexing/test_categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -485,17 +485,17 @@ def test_loc_and_at_with_categorical_index(self):
[1.5, 2.5, 3.5],
[-1.5, -2.5, -3.5],
# numpy int/uint
*[np.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_DTYPES],
*(np.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_DTYPES),
# numpy floats
*[np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in tm.FLOAT_DTYPES],
*(np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in tm.FLOAT_DTYPES),
# numpy object
np.array([1, "b", 3.5], dtype=object),
# pandas scalars
[Interval(1, 4), Interval(4, 6), Interval(6, 9)],
[Timestamp(2019, 1, 1), Timestamp(2019, 2, 1), Timestamp(2019, 3, 1)],
[Timedelta(1, "d"), Timedelta(2, "d"), Timedelta(3, "D")],
# pandas Integer arrays
*[pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES],
*(pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES),
# other pandas arrays
pd.IntervalIndex.from_breaks([1, 4, 6, 9]).array,
pd.date_range("2019-01-01", periods=3).array,
Expand Down