Skip to content

Commit 376f77d

Browse files
authored
STYLE enable ruff PLR5501 #51709 (#51747)
* Removed PLR5501 * 7 elif changed * 8 more * more fixess * moreeeeeeeee changes * fix python_parser.py (try #1) * try no. 2 * fix python_parser.py (try no. 3) * v2.1.0 (fix try 1) * try no. 2 * Removed extra lines
1 parent 27f7365 commit 376f77d

24 files changed

+180
-211
lines changed

pandas/io/formats/format.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -2104,11 +2104,10 @@ def __call__(self, num: float) -> str:
21042104

21052105
if self.use_eng_prefix:
21062106
prefix = self.ENG_PREFIXES[int_pow10]
2107+
elif int_pow10 < 0:
2108+
prefix = f"E-{-int_pow10:02d}"
21072109
else:
2108-
if int_pow10 < 0:
2109-
prefix = f"E-{-int_pow10:02d}"
2110-
else:
2111-
prefix = f"E+{int_pow10:02d}"
2110+
prefix = f"E+{int_pow10:02d}"
21122111

21132112
mant = sign * dnum / (10**pow10)
21142113

pandas/io/formats/info.py

+6-7
Original file line numberDiff line numberDiff line change
@@ -660,14 +660,13 @@ def _create_table_builder(self) -> DataFrameTableBuilder:
660660
)
661661
elif self.verbose is False: # specifically set to False, not necessarily None
662662
return DataFrameTableBuilderNonVerbose(info=self.info)
663+
elif self.exceeds_info_cols:
664+
return DataFrameTableBuilderNonVerbose(info=self.info)
663665
else:
664-
if self.exceeds_info_cols:
665-
return DataFrameTableBuilderNonVerbose(info=self.info)
666-
else:
667-
return DataFrameTableBuilderVerbose(
668-
info=self.info,
669-
with_counts=self.show_counts,
670-
)
666+
return DataFrameTableBuilderVerbose(
667+
info=self.info,
668+
with_counts=self.show_counts,
669+
)
671670

672671

673672
class SeriesInfoPrinter(InfoPrinterAbstract):

pandas/io/formats/printing.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -272,10 +272,9 @@ class TableSchemaFormatter(BaseFormatter):
272272
formatters[mimetype] = TableSchemaFormatter()
273273
# enable it if it's been disabled:
274274
formatters[mimetype].enabled = True
275-
else:
276-
# unregister tableschema mime-type
277-
if mimetype in formatters:
278-
formatters[mimetype].enabled = False
275+
# unregister tableschema mime-type
276+
elif mimetype in formatters:
277+
formatters[mimetype].enabled = False
279278

280279

281280
def default_pprint(thing: Any, max_seq_items: int | None = None) -> str:

pandas/io/parquet.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -134,9 +134,8 @@ def validate_dataframe(df: DataFrame) -> None:
134134
each level of the MultiIndex
135135
"""
136136
)
137-
else:
138-
if df.columns.inferred_type not in {"string", "empty"}:
139-
raise ValueError("parquet must have string column names")
137+
elif df.columns.inferred_type not in {"string", "empty"}:
138+
raise ValueError("parquet must have string column names")
140139

141140
# index level names must be strings
142141
valid_names = all(

pandas/io/parsers/arrow_parser_wrapper.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,9 @@ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
116116
for i, item in enumerate(self.index_col):
117117
if is_integer(item):
118118
self.index_col[i] = frame.columns[item]
119-
else:
120-
# String case
121-
if item not in frame.columns:
122-
raise ValueError(f"Index {item} invalid")
119+
# String case
120+
elif item not in frame.columns:
121+
raise ValueError(f"Index {item} invalid")
123122
frame.set_index(self.index_col, drop=True, inplace=True)
124123
# Clear names if headerless and no name given
125124
if self.header is None and not multi_index_named:

pandas/io/parsers/c_parser_wrapper.py

+16-19
Original file line numberDiff line numberDiff line change
@@ -395,26 +395,23 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
395395
dtype = dtypes.pop()
396396
if is_categorical_dtype(dtype):
397397
result[name] = union_categoricals(arrs, sort_categories=False)
398+
elif isinstance(dtype, ExtensionDtype):
399+
# TODO: concat_compat?
400+
array_type = dtype.construct_array_type()
401+
# error: Argument 1 to "_concat_same_type" of "ExtensionArray"
402+
# has incompatible type "List[Union[ExtensionArray, ndarray]]";
403+
# expected "Sequence[ExtensionArray]"
404+
result[name] = array_type._concat_same_type(arrs) # type: ignore[arg-type]
398405
else:
399-
if isinstance(dtype, ExtensionDtype):
400-
# TODO: concat_compat?
401-
array_type = dtype.construct_array_type()
402-
# error: Argument 1 to "_concat_same_type" of "ExtensionArray"
403-
# has incompatible type "List[Union[ExtensionArray, ndarray]]";
404-
# expected "Sequence[ExtensionArray]"
405-
result[name] = array_type._concat_same_type(
406-
arrs # type: ignore[arg-type]
407-
)
408-
else:
409-
# error: Argument 1 to "concatenate" has incompatible
410-
# type "List[Union[ExtensionArray, ndarray[Any, Any]]]"
411-
# ; expected "Union[_SupportsArray[dtype[Any]],
412-
# Sequence[_SupportsArray[dtype[Any]]],
413-
# Sequence[Sequence[_SupportsArray[dtype[Any]]]],
414-
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]
415-
# , Sequence[Sequence[Sequence[Sequence[
416-
# _SupportsArray[dtype[Any]]]]]]]"
417-
result[name] = np.concatenate(arrs) # type: ignore[arg-type]
406+
# error: Argument 1 to "concatenate" has incompatible
407+
# type "List[Union[ExtensionArray, ndarray[Any, Any]]]"
408+
# ; expected "Union[_SupportsArray[dtype[Any]],
409+
# Sequence[_SupportsArray[dtype[Any]]],
410+
# Sequence[Sequence[_SupportsArray[dtype[Any]]]],
411+
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]
412+
# , Sequence[Sequence[Sequence[Sequence[
413+
# _SupportsArray[dtype[Any]]]]]]]"
414+
result[name] = np.concatenate(arrs) # type: ignore[arg-type]
418415

419416
if warning_columns:
420417
warning_names = ",".join(warning_columns)

pandas/io/parsers/python_parser.py

+12-13
Original file line numberDiff line numberDiff line change
@@ -556,20 +556,19 @@ def _infer_columns(
556556
columns = self._handle_usecols(
557557
columns, columns[0], num_original_columns
558558
)
559+
elif self.usecols is None or len(names) >= num_original_columns:
560+
columns = self._handle_usecols([names], names, num_original_columns)
561+
num_original_columns = len(names)
562+
elif not callable(self.usecols) and len(names) != len(self.usecols):
563+
raise ValueError(
564+
"Number of passed names did not match number of "
565+
"header fields in the file"
566+
)
559567
else:
560-
if self.usecols is None or len(names) >= num_original_columns:
561-
columns = self._handle_usecols([names], names, num_original_columns)
562-
num_original_columns = len(names)
563-
else:
564-
if not callable(self.usecols) and len(names) != len(self.usecols):
565-
raise ValueError(
566-
"Number of passed names did not match number of "
567-
"header fields in the file"
568-
)
569-
# Ignore output but set used columns.
570-
self._handle_usecols([names], names, ncols)
571-
columns = [names]
572-
num_original_columns = ncols
568+
# Ignore output but set used columns.
569+
self._handle_usecols([names], names, ncols)
570+
columns = [names]
571+
num_original_columns = ncols
573572

574573
return columns, num_original_columns, unnamed_cols
575574

pandas/io/pytables.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -2197,9 +2197,8 @@ def update_info(self, info) -> None:
21972197
f"existing_value [{existing_value}] conflicts with "
21982198
f"new value [{value}]"
21992199
)
2200-
else:
2201-
if value is not None or existing_value is not None:
2202-
idx[key] = value
2200+
elif value is not None or existing_value is not None:
2201+
idx[key] = value
22032202

22042203
def set_info(self, info) -> None:
22052204
"""set my state from the passed info"""

pandas/io/xml.py

+32-33
Original file line numberDiff line numberDiff line change
@@ -241,40 +241,39 @@ def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]:
241241
for el in elems
242242
]
243243

244-
else:
245-
if self.names:
246-
dicts = [
247-
{
248-
**el.attrib,
249-
**(
250-
{el.tag: el.text.strip()}
251-
if el.text and not el.text.isspace()
252-
else {}
253-
),
254-
**{
255-
nm: ch.text.strip() if ch.text else None
256-
for nm, ch in zip(self.names, el.findall("*"))
257-
},
258-
}
259-
for el in elems
260-
]
244+
elif self.names:
245+
dicts = [
246+
{
247+
**el.attrib,
248+
**(
249+
{el.tag: el.text.strip()}
250+
if el.text and not el.text.isspace()
251+
else {}
252+
),
253+
**{
254+
nm: ch.text.strip() if ch.text else None
255+
for nm, ch in zip(self.names, el.findall("*"))
256+
},
257+
}
258+
for el in elems
259+
]
261260

262-
else:
263-
dicts = [
264-
{
265-
**el.attrib,
266-
**(
267-
{el.tag: el.text.strip()}
268-
if el.text and not el.text.isspace()
269-
else {}
270-
),
271-
**{
272-
ch.tag: ch.text.strip() if ch.text else None
273-
for ch in el.findall("*")
274-
},
275-
}
276-
for el in elems
277-
]
261+
else:
262+
dicts = [
263+
{
264+
**el.attrib,
265+
**(
266+
{el.tag: el.text.strip()}
267+
if el.text and not el.text.isspace()
268+
else {}
269+
),
270+
**{
271+
ch.tag: ch.text.strip() if ch.text else None
272+
for ch in el.findall("*")
273+
},
274+
}
275+
for el in elems
276+
]
278277

279278
dicts = [
280279
{k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts

pandas/plotting/_matplotlib/boxplot.py

+6-8
Original file line numberDiff line numberDiff line change
@@ -403,11 +403,10 @@ def plot_group(keys, values, ax: Axes, **kwds):
403403
colors = _get_colors()
404404
if column is None:
405405
columns = None
406+
elif isinstance(column, (list, tuple)):
407+
columns = column
406408
else:
407-
if isinstance(column, (list, tuple)):
408-
columns = column
409-
else:
410-
columns = [column]
409+
columns = [column]
411410

412411
if by is not None:
413412
# Prefer array return type for 2-D plots to match the subplot layout
@@ -523,11 +522,10 @@ def boxplot_frame_groupby(
523522
keys, frames = zip(*grouped)
524523
if grouped.axis == 0:
525524
df = pd.concat(frames, keys=keys, axis=1)
525+
elif len(frames) > 1:
526+
df = frames[0].join(frames[1::])
526527
else:
527-
if len(frames) > 1:
528-
df = frames[0].join(frames[1::])
529-
else:
530-
df = frames[0]
528+
df = frames[0]
531529

532530
# GH 16748, DataFrameGroupby fails when subplots=False and `column` argument
533531
# is assigned, and in this case, since `df` here becomes MI after groupby,

pandas/plotting/_matplotlib/core.py

+28-32
Original file line numberDiff line numberDiff line change
@@ -164,17 +164,16 @@ def __init__(
164164
if isinstance(data, DataFrame):
165165
if column:
166166
self.columns = com.maybe_make_list(column)
167+
elif self.by is None:
168+
self.columns = [
169+
col for col in data.columns if is_numeric_dtype(data[col])
170+
]
167171
else:
168-
if self.by is None:
169-
self.columns = [
170-
col for col in data.columns if is_numeric_dtype(data[col])
171-
]
172-
else:
173-
self.columns = [
174-
col
175-
for col in data.columns
176-
if col not in self.by and is_numeric_dtype(data[col])
177-
]
172+
self.columns = [
173+
col
174+
for col in data.columns
175+
if col not in self.by and is_numeric_dtype(data[col])
176+
]
178177

179178
# For `hist` plot, need to get grouped original data before `self.data` is
180179
# updated later
@@ -504,15 +503,14 @@ def _setup_subplots(self):
504503
layout=self.layout,
505504
layout_type=self._layout_type,
506505
)
506+
elif self.ax is None:
507+
fig = self.plt.figure(figsize=self.figsize)
508+
axes = fig.add_subplot(111)
507509
else:
508-
if self.ax is None:
509-
fig = self.plt.figure(figsize=self.figsize)
510-
axes = fig.add_subplot(111)
511-
else:
512-
fig = self.ax.get_figure()
513-
if self.figsize is not None:
514-
fig.set_size_inches(self.figsize)
515-
axes = self.ax
510+
fig = self.ax.get_figure()
511+
if self.figsize is not None:
512+
fig.set_size_inches(self.figsize)
513+
axes = self.ax
516514

517515
axes = flatten_axes(axes)
518516

@@ -1225,14 +1223,13 @@ def _make_plot(self):
12251223

12261224
if self.colormap is not None:
12271225
cmap = mpl.colormaps.get_cmap(self.colormap)
1226+
# cmap is only used if c_values are integers, otherwise UserWarning
1227+
elif is_integer_dtype(c_values):
1228+
# pandas uses colormap, matplotlib uses cmap.
1229+
cmap = "Greys"
1230+
cmap = mpl.colormaps[cmap]
12281231
else:
1229-
# cmap is only used if c_values are integers, otherwise UserWarning
1230-
if is_integer_dtype(c_values):
1231-
# pandas uses colormap, matplotlib uses cmap.
1232-
cmap = "Greys"
1233-
cmap = mpl.colormaps[cmap]
1234-
else:
1235-
cmap = None
1232+
cmap = None
12361233

12371234
if color_by_categorical:
12381235
from matplotlib import colors
@@ -1630,14 +1627,13 @@ def __init__(self, data, **kwargs) -> None:
16301627
self.lim_offset = self.bar_width / 2
16311628
else:
16321629
self.lim_offset = 0
1630+
elif kwargs["align"] == "edge":
1631+
w = self.bar_width / self.nseries
1632+
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
1633+
self.lim_offset = w * 0.5
16331634
else:
1634-
if kwargs["align"] == "edge":
1635-
w = self.bar_width / self.nseries
1636-
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
1637-
self.lim_offset = w * 0.5
1638-
else:
1639-
self.tickoffset = self.bar_width * pos
1640-
self.lim_offset = 0
1635+
self.tickoffset = self.bar_width * pos
1636+
self.lim_offset = 0
16411637

16421638
self.ax_pos = self.tick_pos - self.tickoffset
16431639

pandas/plotting/_matplotlib/timeseries.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -216,9 +216,9 @@ def use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool:
216216

217217
if freq is None: # convert irregular if axes has freq info
218218
freq = ax_freq
219-
else: # do not use tsplot if irregular was plotted first
220-
if (ax_freq is None) and (len(ax.get_lines()) > 0):
221-
return False
219+
# do not use tsplot if irregular was plotted first
220+
elif (ax_freq is None) and (len(ax.get_lines()) > 0):
221+
return False
222222

223223
if freq is None:
224224
return False

pandas/tests/frame/indexing/test_where.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -105,11 +105,10 @@ def _check_align(df, cond, other, check_dtypes=True):
105105

106106
if is_scalar(other):
107107
o = other
108+
elif isinstance(other, np.ndarray):
109+
o = Series(other[:, i], index=result.index).values
108110
else:
109-
if isinstance(other, np.ndarray):
110-
o = Series(other[:, i], index=result.index).values
111-
else:
112-
o = other[k].values
111+
o = other[k].values
113112

114113
new_values = d if c.all() else np.where(c, d, o)
115114
expected = Series(new_values, index=result.index, name=k)

0 commit comments

Comments
 (0)