Skip to content

STYLE fix: pylint "consider-using-in" warning #49213

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Oct 21, 2022
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/core/computation/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,15 +211,15 @@ def stringify(value):

kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if kind in ('datetime64', 'datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
elif kind in ('timedelta64', 'timedelta'):
if isinstance(v, str):
v = Timedelta(v).value
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1754,7 +1754,7 @@ def from_dict(
# error: Incompatible types in assignment (expression has type
# "List[Any]", variable has type "Dict[Any, Any]")
data = list(data.values()) # type: ignore[assignment]
elif orient == "columns" or orient == "tight":
elif orient in ("columns", "tight"):
if columns is not None:
raise ValueError(f"cannot use columns parameter with orient='{orient}'")
else: # pragma: no cover
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4066,7 +4066,7 @@ def _check_indexing_method(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
elif method == "pad" or method == "backfill":
elif method in ('pad', 'backfill'):
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -663,7 +663,7 @@ def _get_insert_freq(self, loc: int, item):
if self.size:
if item is NaT:
pass
elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
elif loc in (0, -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/indexes/range.py
Original file line number Diff line number Diff line change
Expand Up @@ -833,11 +833,11 @@ def delete(self, loc) -> Index: # type: ignore[override]
# In some cases we can retain RangeIndex, see also
# DatetimeTimedeltaMixin._get_delete_Freq
if is_integer(loc):
if loc == 0 or loc == -len(self):
if loc in (0, -len(self)):
return self[1:]
if loc == -1 or loc == len(self) - 1:
if loc in (-1, len(self) - 1):
return self[:-1]
if len(self) == 3 and (loc == 1 or loc == -2):
if len(self) == 3 and loc in (1, -2):
return self[::2]

elif lib.is_list_like(loc):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:

def _has_infs(result) -> bool:
if isinstance(result, np.ndarray):
if result.dtype == "f8" or result.dtype == "f4":
if result.dtype in ("f8", "f4"):
# Note: outside of an nanops-specific test, we always have
# result.ndim == 1, so there is no risk of this ravel making a copy.
return lib.has_infs(result.ravel("K"))
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ def _maybe_align_series_as_frame(frame: DataFrame, series: Series, axis: AxisInt
rvalues = series._values
if not isinstance(rvalues, np.ndarray):
# TODO(EA2D): no need to special-case with 2D EAs
if rvalues.dtype == "datetime64[ns]" or rvalues.dtype == "timedelta64[ns]":
if rvalues.dtype in ("datetime64[ns]", "timedelta64[ns]"):
# We can losslessly+cheaply cast to ndarray
rvalues = np.asarray(rvalues)
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/excel.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ def _border_style(self, style: str | None, width: str | None, color: str | None)
# Return "none" will keep "border" in style dictionary
return "none"

if style == "none" or style == "hidden":
if style in ('none', 'hidden'):
return "none"

width_name = self._get_width_name(width)
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/style.py
Original file line number Diff line number Diff line change
Expand Up @@ -4210,7 +4210,7 @@ def css_calc(x, left: float, right: float, align: str, color: str | list | tuple
z, align = align(values), "zero"
elif isinstance(align, (float, int)):
z, align = float(align), "zero"
elif not (align == "left" or align == "right" or align == "zero"):
elif align not in ("left", "right", "zero"):
raise ValueError(
"`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or be a "
"value defining the center line or a callable that returns a float"
Expand Down
4 changes: 2 additions & 2 deletions pandas/io/formats/style_render.py
Original file line number Diff line number Diff line change
Expand Up @@ -2228,14 +2228,14 @@ def _parse_latex_css_conversion(styles: CSSList) -> CSSList:
"""

def font_weight(value, arg):
if value == "bold" or value == "bolder":
if value in ('bold', 'bolder'):
return "bfseries", f"{arg}"
return None

def font_style(value, arg):
if value == "italic":
return "itshape", f"{arg}"
elif value == "oblique":
if value == "oblique":
return "slshape", f"{arg}"
return None

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ def _expand_colspan_rowspan(

# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
if self.extract_links == "all" or self.extract_links == section:
if self.extract_links in ('all', section):
href = self._href_getter(td)
text = (text, href)
rowspan = int(self._attr_getter(td, "rowspan") or 1)
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/json/_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -1129,7 +1129,7 @@ def _try_convert_data(
pass

# don't coerce 0-len data
if len(data) and (data.dtype == "float" or data.dtype == "object"):
if len(data) and data.dtype in ("float", "object"):

# coerce ints if we can
try:
Expand Down
9 changes: 5 additions & 4 deletions pandas/io/parsers/python_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -789,8 +789,8 @@ def _next_iter_line(self, row_num: int) -> list[Scalar] | None:
return line
except csv.Error as e:
if (
self.on_bad_lines == self.BadLineHandleMethod.ERROR
or self.on_bad_lines == self.BadLineHandleMethod.WARN
self.on_bad_lines in
(self.BadLineHandleMethod.ERROR, self.BadLineHandleMethod.WARN)
):
msg = str(e)

Expand Down Expand Up @@ -1014,8 +1014,9 @@ def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
if new_l is not None:
content.append(new_l)
elif (
self.on_bad_lines == self.BadLineHandleMethod.ERROR
or self.on_bad_lines == self.BadLineHandleMethod.WARN
self.on_bad_lines in
(self.BadLineHandleMethod.ERROR,
self.BadLineHandleMethod.WARN)
):
row_num = self.pos - (content_len - i + footers)
bad_lines.append((row_num, actual_len))
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/parsers/readers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2229,7 +2229,7 @@ def _merge_with_dialect_properties(

# Don't warn if the default parameter was passed in,
# even if it conflicts with the dialect (gh-23761).
if provided != parser_default and provided != dialect_val:
if provided not in (parser_default, dialect_val):
msg = (
f"Conflicting values for '{param}': '{provided}' was "
f"provided, but the dialect specifies '{dialect_val}'. "
Expand Down
6 changes: 3 additions & 3 deletions pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -618,7 +618,7 @@ def __contains__(self, key: str) -> bool:
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
if key in (name, name[1:]):
return True
return False

Expand Down Expand Up @@ -3006,7 +3006,7 @@ def read_index_node(
attrs = node._v_attrs
factory, kwargs = self._get_index_factory(attrs)

if kind == "date" or kind == "object":
if kind in ('date', 'object'):
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
Expand Down Expand Up @@ -5243,7 +5243,7 @@ def __init__(
# see if we have a passed coordinate like
with suppress(ValueError):
inferred = lib.infer_dtype(where, skipna=False)
if inferred == "integer" or inferred == "boolean":
if inferred in ('integer', 'boolean'):
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
Expand Down
5 changes: 2 additions & 3 deletions pandas/io/sas/sas7bdat.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,10 +432,9 @@ def _process_page_metadata(self) -> None:

if subheader_processor is None:
f1 = (
subheader_compression == const.compressed_subheader_id
or subheader_compression == 0
subheader_compression in (const.compressed_subheader_id, 0)
)
f2 = subheader_type == const.compressed_subheader_type
f2 = (subheader_type == const.compressed_subheader_type)
if self.compression and f1 and f2:
self._current_page_data_subheader_pointers.append(
(subheader_offset, subheader_length)
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -1178,7 +1178,7 @@ def _sqlalchemy_type(self, col):
Time,
)

if col_type == "datetime64" or col_type == "datetime":
if col_type in ('datetime64', 'datetime'):
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/stata.py
Original file line number Diff line number Diff line change
Expand Up @@ -2178,7 +2178,7 @@ def _dtype_to_default_stata_fmt(
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
elif dtype in (np.int8, np.int16):
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/apply/test_frame_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -1342,7 +1342,7 @@ def test_size_as_str(how, axis):
# Just a string attribute arg same as calling df.arg
# on the columns
result = getattr(df, how)("size", axis=axis)
if axis == 0 or axis == "index":
if axis in (0, 'index'):
expected = Series(df.shape[0], index=df.columns)
else:
expected = Series(df.shape[1], index=df.index)
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/apply/test_frame_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def test_transform_empty_listlike(float_frame, ops, frame_or_series):
@pytest.mark.parametrize("box", [dict, Series])
def test_transform_dictlike(axis, float_frame, box):
# GH 35964
if axis == 0 or axis == "index":
if axis in (0, "index"):
e = float_frame.columns[0]
expected = float_frame[[e]].transform(np.abs)
else:
Expand Down
6 changes: 3 additions & 3 deletions pandas/tests/apply/test_str.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def test_agg_cython_table_transform_frame(df, func, expected, axis):
# GH 21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
if axis == "columns" or axis == 1:
if axis in ("columns", 1):
# operating blockwise doesn't let us preserve dtypes
expected = expected.astype("float64")

Expand Down Expand Up @@ -273,7 +273,7 @@ def test_transform_groupby_kernel_frame(request, axis, float_frame, op):
# GH 35964

args = [0.0] if op == "fillna" else []
if axis == 0 or axis == "index":
if axis in (0, "index"):
ones = np.ones(float_frame.shape[0])
else:
ones = np.ones(float_frame.shape[1])
Expand All @@ -286,7 +286,7 @@ def test_transform_groupby_kernel_frame(request, axis, float_frame, op):
float_frame["E"] = float_frame["A"].copy()
assert len(float_frame._mgr.arrays) > 1

if axis == 0 or axis == "index":
if axis in (0, "index"):
ones = np.ones(float_frame.shape[0])
else:
ones = np.ones(float_frame.shape[1])
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/frame/methods/test_interpolate.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def test_interp_string_axis(self, axis_name, axis_number):
@pytest.mark.parametrize("method", ["ffill", "bfill", "pad"])
def test_interp_fillna_methods(self, request, axis, method, using_array_manager):
# GH 12918
if using_array_manager and (axis == 1 or axis == "columns"):
if using_array_manager and axis in (1, 'columns'):
# TODO(ArrayManager) support axis=1
td.mark_array_manager_not_yet_implemented(request)

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexes/test_setops.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def test_union_different_types(index_flat, index_flat2, request):
# complex objects non-sortable
warn = RuntimeWarning

any_uint64 = idx1.dtype == np.uint64 or idx2.dtype == np.uint64
any_uint64 = np.uint64 in (idx1.dtype, idx2.dtype)
idx1_signed = is_signed_integer_dtype(idx1.dtype)
idx2_signed = is_signed_integer_dtype(idx2.dtype)

Expand Down
16 changes: 8 additions & 8 deletions pandas/tests/indexing/multiindex/test_slice.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_per_axis_per_level_getitem(self):
d,
)
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
if a in ('A1', 'A2', 'A3') and c in ('C1', 'C3')
]
]
tm.assert_frame_equal(result, expected)
Expand All @@ -49,8 +49,8 @@ def test_per_axis_per_level_getitem(self):
d,
)
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3")
and (c == "C1" or c == "C2" or c == "C3")
if a in ('A1', 'A2', 'A3')
and c in ('C1', 'C2', 'C3')
]
]
result = df.loc[(slice("A1", "A3"), slice(None), slice("C1", "C3")), :]
Expand Down Expand Up @@ -121,7 +121,7 @@ def test_per_axis_per_level_getitem(self):
d,
)
for a, b, c, d in s.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
if a in ('A1', 'A2', 'A3') and c in ('C1', 'C3')
]
]
tm.assert_series_equal(result, expected)
Expand Down Expand Up @@ -416,7 +416,7 @@ def test_per_axis_per_level_doc_examples(self):
d,
)
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
if a in ('A1', 'A2', 'A3') and c in ('C1', 'C3')
]
]
tm.assert_frame_equal(result, expected)
Expand All @@ -433,7 +433,7 @@ def test_per_axis_per_level_doc_examples(self):
d,
)
for a, b, c, d in df.index.values
if (c == "C1" or c == "C3")
if c in ('C1', 'C3')
]
]
tm.assert_frame_equal(result, expected)
Expand Down Expand Up @@ -494,7 +494,7 @@ def test_loc_axis_arguments(self):
d,
)
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
if a in ('A1', 'A2', 'A3') and c in ('C1', 'C3')
]
]
tm.assert_frame_equal(result, expected)
Expand All @@ -509,7 +509,7 @@ def test_loc_axis_arguments(self):
d,
)
for a, b, c, d in df.index.values
if (c == "C1" or c == "C3")
if c in ('C1', 'C3')
]
]
tm.assert_frame_equal(result, expected)
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/excel/test_readers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1222,7 +1222,7 @@ def test_read_excel_nrows_non_integer_parameter(self, read_ext):
("testmultiindex", "both", [0, 1], [0, 1], None),
("testmultiindex", "mi_column_name", [0, 1], 0, None),
("testskiprows", "skiprows_list", None, None, [0, 2]),
("testskiprows", "skiprows_list", None, None, lambda x: x == 0 or x == 2),
("testskiprows", "skiprows_list", None, None, lambda x: x in (0, 2)),
],
)
def test_read_excel_nrows_params(
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/json/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@


def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
if orient in ("records", "values"):
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/util/test_assert_series_equal.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def test_less_precise(data1, data2, dtype, decimals):
s1 = Series([data1], dtype=dtype)
s2 = Series([data2], dtype=dtype)

if (decimals == 5 or decimals == 10) or (
if decimals in (5, 10) or (
decimals >= 3 and abs(data1 - data2) >= 0.0005
):
if is_extension_array_dtype(dtype):
Expand Down
2 changes: 1 addition & 1 deletion pandas/tseries/frequencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ def _is_quarterly(rule: str) -> bool:

def _is_monthly(rule: str) -> bool:
rule = rule.upper()
return rule == "M" or rule == "BM"
return rule in ('M', 'BM')


def _is_weekly(rule: str) -> bool:
Expand Down
4 changes: 2 additions & 2 deletions pandas/tseries/holiday.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ def next_monday_or_tuesday(dt: datetime) -> datetime:
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
if dow in (5, 6):
return dt + timedelta(2)
elif dow == 0:
if dow == 0:
return dt + timedelta(1)
return dt

Expand Down
Loading