Skip to content

STYLE enable ruff PLR5501 #51709 #51747

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 19 commits into from
Mar 7, 2023
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions pandas/io/formats/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -2103,11 +2103,11 @@ def __call__(self, num: float) -> str:

if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]

elif int_pow10 < 0:
prefix = f"E-{-int_pow10:02d}"
else:
if int_pow10 < 0:
prefix = f"E-{-int_pow10:02d}"
else:
prefix = f"E+{int_pow10:02d}"
prefix = f"E+{int_pow10:02d}"

mant = sign * dnum / (10**pow10)

Expand Down
13 changes: 6 additions & 7 deletions pandas/io/formats/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -660,14 +660,13 @@ def _create_table_builder(self) -> DataFrameTableBuilder:
)
elif self.verbose is False: # specifically set to False, not necessarily None
return DataFrameTableBuilderNonVerbose(info=self.info)
elif self.exceeds_info_cols:
return DataFrameTableBuilderNonVerbose(info=self.info)
else:
if self.exceeds_info_cols:
return DataFrameTableBuilderNonVerbose(info=self.info)
else:
return DataFrameTableBuilderVerbose(
info=self.info,
with_counts=self.show_counts,
)
return DataFrameTableBuilderVerbose(
info=self.info,
with_counts=self.show_counts,
)


class SeriesInfoPrinter(InfoPrinterAbstract):
Expand Down
8 changes: 4 additions & 4 deletions pandas/io/formats/printing.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,10 +272,10 @@ class TableSchemaFormatter(BaseFormatter):
formatters[mimetype] = TableSchemaFormatter()
# enable it if it's been disabled:
formatters[mimetype].enabled = True
else:
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False

# unregister tableschema mime-type
elif mimetype in formatters:
formatters[mimetype].enabled = False


def default_pprint(thing: Any, max_seq_items: int | None = None) -> str:
Expand Down
5 changes: 2 additions & 3 deletions pandas/io/parquet.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,8 @@ def validate_dataframe(df: DataFrame) -> None:
each level of the MultiIndex
"""
)
else:
if df.columns.inferred_type not in {"string", "empty"}:
raise ValueError("parquet must have string column names")
elif df.columns.inferred_type not in {"string", "empty"}:
raise ValueError("parquet must have string column names")

# index level names must be strings
valid_names = all(
Expand Down
7 changes: 3 additions & 4 deletions pandas/io/parsers/arrow_parser_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,10 +112,9 @@ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
for i, item in enumerate(self.index_col):
if is_integer(item):
self.index_col[i] = frame.columns[item]
else:
# String case
if item not in frame.columns:
raise ValueError(f"Index {item} invalid")
# String case
elif item not in frame.columns:
raise ValueError(f"Index {item} invalid")
frame.set_index(self.index_col, drop=True, inplace=True)
# Clear names if headerless and no name given
if self.header is None and not multi_index_named:
Expand Down
35 changes: 16 additions & 19 deletions pandas/io/parsers/c_parser_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,26 +394,23 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
dtype = dtypes.pop()
if is_categorical_dtype(dtype):
result[name] = union_categoricals(arrs, sort_categories=False)
elif isinstance(dtype, ExtensionDtype):
# TODO: concat_compat?
array_type = dtype.construct_array_type()
# error: Argument 1 to "_concat_same_type" of "ExtensionArray"
# has incompatible type "List[Union[ExtensionArray, ndarray]]";
# expected "Sequence[ExtensionArray]"
result[name] = array_type._concat_same_type(arrs) # type: ignore[arg-type]
else:
if isinstance(dtype, ExtensionDtype):
# TODO: concat_compat?
array_type = dtype.construct_array_type()
# error: Argument 1 to "_concat_same_type" of "ExtensionArray"
# has incompatible type "List[Union[ExtensionArray, ndarray]]";
# expected "Sequence[ExtensionArray]"
result[name] = array_type._concat_same_type(
arrs # type: ignore[arg-type]
)
else:
# error: Argument 1 to "concatenate" has incompatible
# type "List[Union[ExtensionArray, ndarray[Any, Any]]]"
# ; expected "Union[_SupportsArray[dtype[Any]],
# Sequence[_SupportsArray[dtype[Any]]],
# Sequence[Sequence[_SupportsArray[dtype[Any]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]
# , Sequence[Sequence[Sequence[Sequence[
# _SupportsArray[dtype[Any]]]]]]]"
result[name] = np.concatenate(arrs) # type: ignore[arg-type]
# error: Argument 1 to "concatenate" has incompatible
# type "List[Union[ExtensionArray, ndarray[Any, Any]]]"
# ; expected "Union[_SupportsArray[dtype[Any]],
# Sequence[_SupportsArray[dtype[Any]]],
# Sequence[Sequence[_SupportsArray[dtype[Any]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]
# , Sequence[Sequence[Sequence[Sequence[
# _SupportsArray[dtype[Any]]]]]]]"
result[name] = np.concatenate(arrs) # type: ignore[arg-type]

if warning_columns:
warning_names = ",".join(warning_columns)
Expand Down
25 changes: 12 additions & 13 deletions pandas/io/parsers/python_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -555,20 +555,19 @@ def _infer_columns(
columns = self._handle_usecols(
columns, columns[0], num_original_columns
)
elif self.usecols is None or len(names) >= num_original_columns:
columns = self._handle_usecols([names], names, num_original_columns)
num_original_columns = len(names)
elif not callable(self.usecols) and len(names) != len(self.usecols):
raise ValueError(
"Number of passed names did not match number of "
"header fields in the file"
)
else:
if self.usecols is None or len(names) >= num_original_columns:
columns = self._handle_usecols([names], names, num_original_columns)
num_original_columns = len(names)
else:
if not callable(self.usecols) and len(names) != len(self.usecols):
raise ValueError(
"Number of passed names did not match number of "
"header fields in the file"
)
# Ignore output but set used columns.
self._handle_usecols([names], names, ncols)
columns = [names]
num_original_columns = ncols
# Ignore output but set used columns.
self._handle_usecols([names], names, ncols)
columns = [names]
num_original_columns = ncols

return columns, num_original_columns, unnamed_cols

Expand Down
6 changes: 3 additions & 3 deletions pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -2196,9 +2196,9 @@ def update_info(self, info) -> None:
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
else:
if value is not None or existing_value is not None:
idx[key] = value

elif value is not None or existing_value is not None:
idx[key] = value

def set_info(self, info) -> None:
"""set my state from the passed info"""
Expand Down
65 changes: 32 additions & 33 deletions pandas/io/xml.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,40 +240,39 @@ def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]:
for el in elems
]

else:
if self.names:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
nm: ch.text.strip() if ch.text else None
for nm, ch in zip(self.names, el.findall("*"))
},
}
for el in elems
]
elif self.names:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
nm: ch.text.strip() if ch.text else None
for nm, ch in zip(self.names, el.findall("*"))
},
}
for el in elems
]

else:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
ch.tag: ch.text.strip() if ch.text else None
for ch in el.findall("*")
},
}
for el in elems
]
else:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
ch.tag: ch.text.strip() if ch.text else None
for ch in el.findall("*")
},
}
for el in elems
]

dicts = [
{k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts
Expand Down
14 changes: 6 additions & 8 deletions pandas/plotting/_matplotlib/boxplot.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,11 +403,10 @@ def plot_group(keys, values, ax: Axes, **kwds):
colors = _get_colors()
if column is None:
columns = None
elif isinstance(column, (list, tuple)):
columns = column
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
columns = [column]

if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
Expand Down Expand Up @@ -523,11 +522,10 @@ def boxplot_frame_groupby(
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = pd.concat(frames, keys=keys, axis=1)
elif len(frames) > 1:
df = frames[0].join(frames[1::])
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
df = frames[0]

# GH 16748, DataFrameGroupby fails when subplots=False and `column` argument
# is assigned, and in this case, since `df` here becomes MI after groupby,
Expand Down
60 changes: 28 additions & 32 deletions pandas/plotting/_matplotlib/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,17 +164,16 @@ def __init__(
if isinstance(data, DataFrame):
if column:
self.columns = com.maybe_make_list(column)
elif self.by is None:
self.columns = [
col for col in data.columns if is_numeric_dtype(data[col])
]
else:
if self.by is None:
self.columns = [
col for col in data.columns if is_numeric_dtype(data[col])
]
else:
self.columns = [
col
for col in data.columns
if col not in self.by and is_numeric_dtype(data[col])
]
self.columns = [
col
for col in data.columns
if col not in self.by and is_numeric_dtype(data[col])
]

# For `hist` plot, need to get grouped original data before `self.data` is
# updated later
Expand Down Expand Up @@ -504,15 +503,14 @@ def _setup_subplots(self):
layout=self.layout,
layout_type=self._layout_type,
)
elif self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax

axes = flatten_axes(axes)

Expand Down Expand Up @@ -1225,14 +1223,13 @@ def _make_plot(self):

if self.colormap is not None:
cmap = mpl.colormaps.get_cmap(self.colormap)
# cmap is only used if c_values are integers, otherwise UserWarning
elif is_integer_dtype(c_values):
# pandas uses colormap, matplotlib uses cmap.
cmap = "Greys"
cmap = mpl.colormaps[cmap]
else:
# cmap is only used if c_values are integers, otherwise UserWarning
if is_integer_dtype(c_values):
# pandas uses colormap, matplotlib uses cmap.
cmap = "Greys"
cmap = mpl.colormaps[cmap]
else:
cmap = None
cmap = None

if color_by_categorical:
from matplotlib import colors
Expand Down Expand Up @@ -1630,14 +1627,13 @@ def __init__(self, data, **kwargs) -> None:
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
elif kwargs["align"] == "edge":
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
if kwargs["align"] == "edge":
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.tickoffset = self.bar_width * pos
self.lim_offset = 0

self.ax_pos = self.tick_pos - self.tickoffset

Expand Down
6 changes: 3 additions & 3 deletions pandas/plotting/_matplotlib/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,9 @@ def use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool:

if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
# do not use tsplot if irregular was plotted first
elif (ax_freq is None) and (len(ax.get_lines()) > 0):
return False

if freq is None:
return False
Expand Down
7 changes: 3 additions & 4 deletions pandas/tests/frame/indexing/test_where.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,10 @@ def _check_align(df, cond, other, check_dtypes=True):

if is_scalar(other):
o = other
elif isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
o = other[k].values

new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
Expand Down
Loading