Skip to content

Use black 19.10b0 #29508

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Nov 17, 2019
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/python/black
rev: stable
rev: 19.10b0
hooks:
- id: black
language_version: python3.7
Expand Down
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ dependencies:
- cython>=0.29.13

# code checks
- black<=19.3b0
- black>=19.10b0
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's make this ==

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure done here - #29673

- cpplint
- flake8
- flake8-comprehensions # used by flake8, linting of unnecessary comprehensions
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1153,7 +1153,7 @@ def compute(self, method):
n = min(n, narr)

kth_val = algos.kth_smallest(arr.copy(), n - 1)
ns, = np.nonzero(arr <= kth_val)
(ns,) = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind="mergesort")]

if self.keep != "all":
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -4793,7 +4793,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
duplicated = self.duplicated(subset, keep=keep)

if inplace:
inds, = (-duplicated)._ndarray_values.nonzero()
(inds,) = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3604,7 +3604,7 @@ class animal locomotion

if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
(inds,) = loc.nonzero()
return self.take(inds, axis=axis)
else:
return self.take(loc, axis=axis)
Expand Down
8 changes: 5 additions & 3 deletions pandas/core/groupby/grouper.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,9 +287,11 @@ def __init__(
if self.name is None:
self.name = index.names[level]

self.grouper, self._codes, self._group_index = index._get_grouper_for_level( # noqa: E501
self.grouper, level
)
(
self.grouper,
self._codes,
self._group_index,
) = index._get_grouper_for_level(self.grouper, level)

# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get codes
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1869,7 +1869,7 @@ def _isnan(self):
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
(w,) = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ def _setitem_with_indexer(self, indexer, value):
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
(blk,) = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value, dict) else value
take_split_path = not blk._can_hold_element(val)
Expand Down Expand Up @@ -1111,7 +1111,7 @@ def _getitem_iterable(self, key, axis: int):
if com.is_bool_indexer(key):
# A boolean indexer
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
(inds,) = key.nonzero()
return self.obj.take(inds, axis=axis)
else:
# A collection of keys
Expand Down Expand Up @@ -1255,7 +1255,7 @@ def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False):

if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
(inds,) = obj.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1862,7 +1862,7 @@ def _shape_compat(x):


def _interleaved_dtype(
blocks: List[Block]
blocks: List[Block],
) -> Optional[Union[np.dtype, ExtensionDtype]]:
"""Find the common dtype for `blocks`.

Expand Down
4 changes: 2 additions & 2 deletions pandas/io/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def _is_url(url) -> bool:


def _expand_user(
filepath_or_buffer: FilePathOrBuffer[AnyStr]
filepath_or_buffer: FilePathOrBuffer[AnyStr],
) -> FilePathOrBuffer[AnyStr]:
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Expand Down Expand Up @@ -139,7 +139,7 @@ def _validate_header_arg(header) -> None:


def _stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr]
filepath_or_buffer: FilePathOrBuffer[AnyStr],
) -> FilePathOrBuffer[AnyStr]:
"""Attempt to convert a path-like object to a string.

Expand Down
14 changes: 12 additions & 2 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1918,7 +1918,12 @@ def __init__(self, src, **kwds):
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns( # noqa: E501
(
self.names,
self.index_names,
self.col_names,
passed_names,
) = self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names, passed_names
)
else:
Expand Down Expand Up @@ -2307,7 +2312,12 @@ def __init__(self, f, **kwds):
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns( # noqa: E501
(
self.columns,
self.index_names,
self.col_names,
_,
) = self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
# Update list of original names to include all indices.
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/stata.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,7 @@ def _cast_to_stata_types(data):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):
ws = precision_loss_doc % ("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
Expand Down
12 changes: 8 additions & 4 deletions pandas/tests/arrays/sparse/test_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -658,12 +658,16 @@ def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])

sparse = SparseArray(dense)
res = sparse[4:,] # noqa: E231
res = sparse[
4:,
] # noqa: E231
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is pretty gnarly. is there an alternative?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems related to: psf/black#1139

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a way to turn off formatting for a single line? (something like res = sparse[4:, ] # fmt: off, but from their README it seems this only works for blocks)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was surprised that black doesn't support this - I upvoted -> psf/black#790

I suspect they will revert this in psf/black#1139

exp = SparseArray(dense[4:,]) # noqa: E231
tm.assert_sp_array_equal(res, exp)

sparse = SparseArray(dense, fill_value=0)
res = sparse[4:,] # noqa: E231
res = sparse[
4:,
] # noqa: E231
exp = SparseArray(dense[4:,], fill_value=0) # noqa: E231
tm.assert_sp_array_equal(res, exp)

Expand Down Expand Up @@ -823,11 +827,11 @@ def test_nonzero(self):
# Tests regression #21172.
sa = pd.SparseArray([float("nan"), float("nan"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
expected = np.array([2, 5, 9], dtype=np.int32)
result, = sa.nonzero()
(result,) = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)

sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
result, = sa.nonzero()
(result,) = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)


Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/dtypes/test_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ def test_convert_numeric_int64_uint64(self, case, coerce):
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)

@pytest.mark.parametrize("value", [-2 ** 63 - 1, 2 ** 64])
@pytest.mark.parametrize("value", [-(2 ** 63) - 1, 2 ** 64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
Expand Down
6 changes: 3 additions & 3 deletions pandas/tests/frame/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,9 +245,9 @@ def test_constructor_overflow_int64(self):
np.array([2 ** 64], dtype=object),
np.array([2 ** 65]),
[2 ** 64 + 1],
np.array([-2 ** 63 - 4], dtype=object),
np.array([-2 ** 64 - 1]),
[-2 ** 65 - 2],
np.array([-(2 ** 63) - 4], dtype=object),
np.array([-(2 ** 64) - 1]),
[-(2 ** 65) - 2],
],
)
def test_constructor_int_overflow(self, values):
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexes/period/test_construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start="2000", periods=2)

warning, = m
(warning,) = m
assert 'freq="A-DEC"' in str(warning.message)

def test_constructor(self):
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexing/multiindex/test_getitem.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def test_series_getitem_indexing_errors(


def test_series_getitem_corner_generator(
multiindex_year_month_day_dataframe_random_data
multiindex_year_month_day_dataframe_random_data,
):
s = multiindex_year_month_day_dataframe_random_data["A"]
result = s[(x > 0 for x in s)]
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexing/multiindex/test_xs.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def test_xs_level_series_ymd(multiindex_year_month_day_dataframe_random_data):


def test_xs_level_series_slice_not_implemented(
multiindex_year_month_day_dataframe_random_data
multiindex_year_month_day_dataframe_random_data,
):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
Expand Down
12 changes: 9 additions & 3 deletions pandas/tests/indexing/test_callable.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,14 @@ def test_frame_loc_callable(self):
res = df.loc[lambda x: x.A > 2]
tm.assert_frame_equal(res, df.loc[df.A > 2])

res = df.loc[lambda x: x.A > 2,] # noqa: E231
res = df.loc[
lambda x: x.A > 2,
] # noqa: E231
tm.assert_frame_equal(res, df.loc[df.A > 2,]) # noqa: E231

res = df.loc[lambda x: x.A > 2,] # noqa: E231
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't read any better either

res = df.loc[
lambda x: x.A > 2,
] # noqa: E231
tm.assert_frame_equal(res, df.loc[df.A > 2,]) # noqa: E231

res = df.loc[lambda x: x.B == "b", :]
Expand Down Expand Up @@ -90,7 +94,9 @@ def test_frame_loc_callable_labels(self):
res = df.loc[lambda x: ["A", "C"]]
tm.assert_frame_equal(res, df.loc[["A", "C"]])

res = df.loc[lambda x: ["A", "C"],] # noqa: E231
res = df.loc[
lambda x: ["A", "C"],
] # noqa: E231
tm.assert_frame_equal(res, df.loc[["A", "C"],]) # noqa: E231

res = df.loc[lambda x: ["A", "C"], :]
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/io/parser/test_index_col.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ def test_index_col_named(all_parsers, with_header):
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
header = (
"ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
) # noqa
"ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n" # noqa
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suppose this noqa is also not needed? (was needed before for too long line length, but that is no longer the case)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ahh I missed this - done

)

if with_header:
data = header + no_header
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/reductions/test_reductions.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,8 @@ class TestIndexReductions:
[
(0, 400, 3),
(500, 0, -6),
(-10 ** 6, 10 ** 6, 4),
(10 ** 6, -10 ** 6, -4),
(-(10 ** 6), 10 ** 6, 4),
(10 ** 6, -(10 ** 6), -4),
(0, 10, 20),
],
)
Expand Down
10 changes: 5 additions & 5 deletions pandas/tests/test_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,10 +215,10 @@ def test_uint64_factorize(self, writable):
tm.assert_numpy_array_equal(uniques, exp_uniques)

def test_int64_factorize(self, writable):
data = np.array([2 ** 63 - 1, -2 ** 63, 2 ** 63 - 1], dtype=np.int64)
data = np.array([2 ** 63 - 1, -(2 ** 63), 2 ** 63 - 1], dtype=np.int64)
data.setflags(write=writable)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2 ** 63 - 1, -2 ** 63], dtype=np.int64)
exp_uniques = np.array([2 ** 63 - 1, -(2 ** 63)], dtype=np.int64)

labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
Expand Down Expand Up @@ -257,7 +257,7 @@ def test_deprecate_order(self):
"data",
[
np.array([0, 1, 0], dtype="u8"),
np.array([-2 ** 63, 1, -2 ** 63], dtype="i8"),
np.array([-(2 ** 63), 1, -(2 ** 63)], dtype="i8"),
np.array(["__nan__", "foo", "__nan__"], dtype="object"),
],
)
Expand All @@ -274,8 +274,8 @@ def test_parametrized_factorize_na_value_default(self, data):
[
(np.array([0, 1, 0, 2], dtype="u8"), 0),
(np.array([1, 0, 1, 2], dtype="u8"), 1),
(np.array([-2 ** 63, 1, -2 ** 63, 0], dtype="i8"), -2 ** 63),
(np.array([1, -2 ** 63, 1, 0], dtype="i8"), 1),
(np.array([-(2 ** 63), 1, -(2 ** 63), 0], dtype="i8"), -(2 ** 63)),
(np.array([1, -(2 ** 63), 1, 0], dtype="i8"), 1),
(np.array(["a", "", "a", "b"], dtype=object), "a"),
(np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()),
(np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)),
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/test_nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ def test_nanmean_overflow(self):
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy

for a in [2 ** 55, -2 ** 55, 20150515061816532]:
for a in [2 ** 55, -(2 ** 55), 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
Expand Down
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ python-dateutil>=2.6.1
pytz
asv
cython>=0.29.13
black<=19.3b0
black>=19.10b0
cpplint
flake8
flake8-comprehensions
Expand Down