From 6633717a880037611187700c7a27a36f17ae1719 Mon Sep 17 00:00:00 2001 From: carlotta Date: Fri, 4 Nov 2022 13:04:20 +0100 Subject: [PATCH 1/2] fix pylint: no-else-raise --- doc/make.py | 3 +- pandas/_testing/_io.py | 5 +-- pandas/compat/_optional.py | 3 +- pandas/compat/numpy/function.py | 3 +- pandas/conftest.py | 3 +- pandas/core/apply.py | 6 +-- pandas/core/arrays/datetimelike.py | 2 +- pandas/core/arrays/datetimes.py | 2 +- pandas/core/arrays/interval.py | 8 ++-- pandas/core/arrays/masked.py | 4 +- pandas/core/arrays/period.py | 2 +- pandas/core/arrays/sparse/array.py | 5 +-- pandas/core/arrays/timedeltas.py | 8 ++-- pandas/core/common.py | 2 +- pandas/core/computation/eval.py | 2 +- pandas/core/construction.py | 2 +- pandas/core/dtypes/cast.py | 18 ++++---- pandas/core/dtypes/common.py | 5 +-- pandas/core/dtypes/dtypes.py | 2 +- pandas/core/generic.py | 14 +++---- pandas/core/groupby/generic.py | 2 +- pandas/core/groupby/groupby.py | 2 +- pandas/core/groupby/grouper.py | 2 +- pandas/core/groupby/ops.py | 8 ++-- pandas/core/indexes/base.py | 17 ++++---- pandas/core/indexes/datetimes.py | 2 +- pandas/core/indexes/interval.py | 6 +-- pandas/core/indexes/multi.py | 14 +++---- pandas/core/indexes/numeric.py | 10 ++--- pandas/core/internals/blocks.py | 4 +- pandas/core/internals/construction.py | 6 +-- pandas/core/ops/array_ops.py | 3 +- pandas/core/resample.py | 2 +- pandas/core/reshape/encoding.py | 2 +- pandas/core/reshape/merge.py | 15 +++---- pandas/core/reshape/tile.py | 7 ++-- pandas/core/series.py | 2 +- pandas/core/tools/datetimes.py | 4 +- pandas/core/tools/timedeltas.py | 2 +- pandas/core/tools/times.py | 2 +- pandas/core/window/ewm.py | 7 ++-- pandas/core/window/rolling.py | 41 +++++++++---------- pandas/io/excel/_base.py | 8 ++-- pandas/io/formats/csvs.py | 3 +- pandas/io/formats/excel.py | 3 +- pandas/io/formats/style.py | 6 +-- pandas/io/formats/style_render.py | 2 +- pandas/io/json/_normalize.py | 2 +- pandas/io/parquet.py | 2 +- pandas/io/parsers/python_parser.py | 2 +- pandas/io/parsers/readers.py | 7 ++-- pandas/io/pytables.py | 4 +- pandas/io/spss.py | 3 +- pandas/io/sql.py | 13 +++--- pandas/io/xml.py | 10 ++--- pandas/plotting/_core.py | 6 +-- pandas/plotting/_matplotlib/core.py | 23 +++++------ pandas/plotting/_matplotlib/misc.py | 2 +- .../tests/groupby/aggregate/test_aggregate.py | 3 +- pandas/tests/groupby/test_filters.py | 3 +- pandas/tests/groupby/test_groupby.py | 3 +- pandas/util/_decorators.py | 3 +- pandas/util/_validators.py | 2 +- pyproject.toml | 1 - 64 files changed, 167 insertions(+), 203 deletions(-) diff --git a/doc/make.py b/doc/make.py index c758c7fc84bbb..f5bf170c6274d 100755 --- a/doc/make.py +++ b/doc/make.py @@ -259,8 +259,7 @@ def latex(self, force=False): "You should check the file " '"build/latex/pandas.pdf" for problems.' ) - else: - self._run_os("make") + self._run_os("make") return ret_code def latex_forced(self): diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py index bef2730ec630b..527e8c1d0d090 100644 --- a/pandas/_testing/_io.py +++ b/pandas/_testing/_io.py @@ -243,10 +243,7 @@ def wrapper(*args, **kwargs): if not isinstance(err, error_classes) or raise_on_error: raise - else: - pytest.skip( - f"Skipping test due to lack of connectivity and error {err}" - ) + pytest.skip(f"Skipping test due to lack of connectivity and error {err}") return wrapper diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 6c7753e09184f..699d1b565fc71 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -141,8 +141,7 @@ def import_optional_dependency( except ImportError: if errors == "raise": raise ImportError(msg) - else: - return None + return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 6dc4a66f34710..b02dfac1400d1 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -412,8 +412,7 @@ def validate_resampler_func(method: str, args, kwargs) -> None: "numpy operations are not valid with resample. " f"Use .resample(...).{method}() instead" ) - else: - raise TypeError("too many arguments passed in") + raise TypeError("too many arguments passed in") def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None: diff --git a/pandas/conftest.py b/pandas/conftest.py index 61915b4070945..4639799d2ee03 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1163,8 +1163,7 @@ def deco(*args): raise ValueError( f"Could not find file {path} and --strict-data-files is set." ) - else: - pytest.skip(f"Could not find {path}.") + pytest.skip(f"Could not find {path}.") return path return deco diff --git a/pandas/core/apply.py b/pandas/core/apply.py index e8078dd4a28a4..b93a2834b9fa0 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -464,7 +464,7 @@ def apply_str(self) -> DataFrame | Series: "axis" not in arg_names or f in ("corrwith", "skew") ): raise ValueError(f"Operation {f} does not support axis=1") - elif "axis" in arg_names: + if "axis" in arg_names: self.kwargs["axis"] = self.axis return self._try_aggregate_string_function(obj, f, *self.args, **self.kwargs) @@ -762,7 +762,7 @@ def apply_broadcast(self, target: DataFrame) -> DataFrame: # must be a scalar or 1d if ares > 1: raise ValueError("too many dims to broadcast") - elif ares == 1: + if ares == 1: # must match return dim if result_compare != len(res): @@ -1179,7 +1179,7 @@ def reconstruct_func( "Function names must be unique if there is no new column names " "assigned" ) - elif func is None: + if func is None: # nicer error message raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index b4198575c3f06..b28f1908feb9f 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -2264,7 +2264,7 @@ def validate_inferred_freq( "values does not conform to passed frequency " f"{freq.freqstr}" ) - elif freq is None: + if freq is None: freq = inferred_freq freq_infer = False diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 43acaafdd65cc..fcd6fbb1b5433 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2368,7 +2368,7 @@ def validate_tz_from_dtype( if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype with a tz") - elif explicit_tz_none: + if explicit_tz_none: raise ValueError("Cannot pass both a timezone-aware dtype and tz=None") tz = dtz diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 5e888f4babc95..77e2fdac26da9 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -314,17 +314,17 @@ def _simple_new( f"right [{type(right).__name__}] types" ) raise ValueError(msg) - elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): + if is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): # GH 19016 msg = ( "category, object, and string subtypes are not supported " "for IntervalArray" ) raise TypeError(msg) - elif isinstance(left, ABCPeriodIndex): + if isinstance(left, ABCPeriodIndex): msg = "Period dtypes are not supported, use a PeriodIndex instead" raise ValueError(msg) - elif isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz): + if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz): msg = ( "left and right must have the same time zone, got " f"'{left.tz}' and '{right.tz}'" @@ -1321,7 +1321,7 @@ def mid(self) -> Index: def overlaps(self, other): if isinstance(other, (IntervalArray, ABCIntervalIndex)): raise NotImplementedError - elif not isinstance(other, Interval): + if not isinstance(other, Interval): msg = f"`other` must be Interval-like, got {type(other).__name__}" raise TypeError(msg) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 746175ee3374d..2727d5c82ac83 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -655,7 +655,7 @@ def _arith_method(self, other, op): raise NotImplementedError( f"operator '{op_name}' not implemented for bool dtypes" ) - elif op_name in {"mod", "rmod"}: + if op_name in {"mod", "rmod"}: dtype = "int8" else: dtype = "bool" @@ -1034,7 +1034,7 @@ def _quantile( # I think this should be out_mask=self.isna().all(axis=1) # but am holding off until we have tests raise NotImplementedError - elif self.isna().all(): + if self.isna().all(): out_mask = np.ones(res.shape, dtype=bool) else: out_mask = np.zeros(res.shape, dtype=bool) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 3b21cc1ecff48..f7808a729fa0a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -1167,7 +1167,7 @@ def _make_field_arrays(*fields) -> list[np.ndarray]: if isinstance(x, (list, np.ndarray, ABCSeries)): if length is not None and len(x) != length: raise ValueError("Mismatched Period array lengths") - elif length is None: + if length is None: length = len(x) # error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index d167037fa3015..93d6ac0ef6e06 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -781,7 +781,7 @@ def fillna( ): raise ValueError("Must specify one of 'method' or 'value'.") - elif method is not None: + if method is not None: msg = "fillna with 'method' requires high memory usage." warnings.warn( msg, @@ -1172,8 +1172,7 @@ def _take_without_fill(self: SparseArrayT, indices) -> SparseArrayT: if (indices.max() >= n) or (indices.min() < -n): if n == 0: raise IndexError("cannot do a non-empty take from an empty axes.") - else: - raise IndexError("out of bounds value in 'indices'.") + raise IndexError("out of bounds value in 'indices'.") if to_shift.any(): indices = indices.copy() diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 3c1f7ccc28ff7..65996b1df5e9a 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -490,7 +490,7 @@ def __truediv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide vectors with unequal lengths") - elif is_timedelta64_dtype(other.dtype): + if is_timedelta64_dtype(other.dtype): # let numpy handle it return self._ndarray / other @@ -554,7 +554,7 @@ def __rtruediv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide vectors with unequal lengths") - elif is_timedelta64_dtype(other.dtype): + if is_timedelta64_dtype(other.dtype): # let numpy handle it return other / self._ndarray @@ -606,7 +606,7 @@ def __floordiv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide with unequal lengths") - elif is_timedelta64_dtype(other.dtype): + if is_timedelta64_dtype(other.dtype): other = type(self)(other) # numpy timedelta64 does not natively support floordiv, so operate @@ -675,7 +675,7 @@ def __rfloordiv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide with unequal lengths") - elif is_timedelta64_dtype(other.dtype): + if is_timedelta64_dtype(other.dtype): other = type(self)(other) # numpy timedelta64 does not natively support floordiv, so operate # on the i8 values diff --git a/pandas/core/common.py b/pandas/core/common.py index 02297855ad389..c73c31c2a103b 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -388,7 +388,7 @@ def standardize_mapping(into): into = type(into) if not issubclass(into, abc.Mapping): raise TypeError(f"unsupported type: {into}") - elif into == defaultdict: + if into == defaultdict: raise TypeError("to_dict() only accepts initialized defaultdicts") return into diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index bc8c37b9273ce..f0127ae05182a 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -344,7 +344,7 @@ def eval( "Multi-line expressions are only valid " "if all expressions contain an assignment" ) - elif inplace: + if inplace: raise ValueError("Cannot operate inplace if there is no assignment") # assign if needed diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 99acb511ca34d..997611d7860db 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -698,7 +698,7 @@ def _sanitize_ndim( if getattr(result, "ndim", 0) == 0: raise ValueError("result should be arraylike with ndim > 0") - elif result.ndim == 1: + if result.ndim == 1: # the result that we want result = _maybe_repeat(result, index) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index a75448347233c..31fb0bb24dec0 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1866,7 +1866,7 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: return element raise LossySetitemError - elif is_integer(element) or (is_float(element) and element.is_integer()): + if is_integer(element) or (is_float(element) and element.is_integer()): # e.g. test_setitem_series_int8 if we have a python int 1 # tipo may be np.int32, despite the fact that it will fit # in smaller int dtypes. @@ -1893,7 +1893,7 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: # Anything other than integer we cannot hold raise LossySetitemError - elif ( + if ( dtype.kind == "u" and isinstance(element, np.ndarray) and element.dtype.kind == "i" @@ -1905,9 +1905,9 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: # itemsize issues there? return casted raise LossySetitemError - elif dtype.itemsize < tipo.itemsize: + if dtype.itemsize < tipo.itemsize: raise LossySetitemError - elif not isinstance(tipo, np.dtype): + if not isinstance(tipo, np.dtype): # i.e. nullable IntegerDtype; we can put this into an ndarray # losslessly iff it has no NAs if element._hasna: @@ -1918,7 +1918,7 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: raise LossySetitemError - elif dtype.kind == "f": + if dtype.kind == "f": if lib.is_integer(element) or lib.is_float(element): casted = dtype.type(element) if np.isnan(casted) or casted == element: @@ -1931,7 +1931,7 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: if tipo.kind not in ["f", "i", "u"]: # Anything other than float/integer we cannot hold raise LossySetitemError - elif not isinstance(tipo, np.dtype): + if not isinstance(tipo, np.dtype): # i.e. nullable IntegerDtype or FloatingDtype; # we can put this into an ndarray losslessly iff it has no NAs if element._hasna: @@ -1950,7 +1950,7 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: raise LossySetitemError - elif dtype.kind == "c": + if dtype.kind == "c": if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element): if np.isnan(element): # see test_where_complex GH#6345 @@ -1968,7 +1968,7 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: raise LossySetitemError raise LossySetitemError - elif dtype.kind == "b": + if dtype.kind == "b": if tipo is not None: if tipo.kind == "b": if not isinstance(tipo, np.dtype): @@ -1982,7 +1982,7 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: return element raise LossySetitemError - elif dtype.kind == "S": + if dtype.kind == "S": # TODO: test tests.frame.methods.test_replace tests get here, # need more targeted tests. xref phofl has a PR about this if tipo is not None: diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index a7b8e720ad8e2..e9d3721bbb5f5 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1471,7 +1471,7 @@ def get_dtype(arr_or_dtype) -> DtypeObj: raise TypeError("Cannot deduce dtype from null object") # fastpath - elif isinstance(arr_or_dtype, np.dtype): + if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype elif isinstance(arr_or_dtype, type): return np.dtype(arr_or_dtype) @@ -1639,8 +1639,7 @@ def validate_all_hashable(*args, error_name: str | None = None) -> None: if not all(is_hashable(arg) for arg in args): if error_name: raise TypeError(f"{error_name} must be a hashable type") - else: - raise TypeError("All elements must be hashable") + raise TypeError("All elements must be hashable") def pandas_dtype(dtype) -> DtypeObj: diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e57e11f1bd2bd..ba63ba2638c2d 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -523,7 +523,7 @@ def validate_categories(categories, fastpath: bool = False) -> Index: raise TypeError( f"Parameter 'categories' must be list-like, was {repr(categories)}" ) - elif not isinstance(categories, ABCIndex): + if not isinstance(categories, ABCIndex): categories = Index._with_infer(categories, tupleize_cols=False) if not fastpath: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f88fe35360074..b7fab924d7988 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -993,7 +993,7 @@ def _rename( raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) - elif mapper is not None: + if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) @@ -4125,7 +4125,7 @@ def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): if value == "raise": raise SettingWithCopyError(t) - elif value == "warn": + if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: @@ -8355,7 +8355,7 @@ def between_time( ) # If any of the deprecated arguments ('include_start', 'include_end') # have been passed - elif old_include_arg_used: + if old_include_arg_used: warnings.warn( "`include_start` and `include_end` are deprecated in " "favour of `inclusive`.", @@ -9595,7 +9595,7 @@ def _where( if axis is None and not other._indexed_same(self): raise InvalidIndexError - elif other.ndim < self.ndim: + if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: @@ -10265,8 +10265,7 @@ def _tz_convert(ax, tz): raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) - else: - ax = DatetimeIndex([], tz=tz) + ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax @@ -10435,8 +10434,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent): raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) - else: - ax = DatetimeIndex([], tz=tz) + ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index cea9aaf70ccd0..6f0562725c9b5 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -306,7 +306,7 @@ def _aggregate_multiple_funcs(self, arg) -> DataFrame: # GH 15931 raise SpecificationError("nested renamer is not supported") - elif any(isinstance(x, (tuple, list)) for x in arg): + if any(isinstance(x, (tuple, list)) for x in arg): arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] # indicated column order diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 369fd37bf3a92..edda5492aca6d 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1759,7 +1759,7 @@ def _cython_agg_general( raise NotImplementedError( f"{type(self).__name__}.{how} does not implement {kwd_name}." ) - elif not is_ser: + if not is_ser: data = data.get_numeric_data(copy=False) def array_func(values: ArrayLike) -> ArrayLike: diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 175af95867c8e..1cc5e90f9a3a4 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -901,7 +901,7 @@ def is_in_obj(gpr) -> bool: if len(groupings) == 0 and len(obj): raise ValueError("No group keys passed!") - elif len(groupings) == 0: + if len(groupings) == 0: groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) # create the internals grouper diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index bf3f74330e8cb..c20fe34a178f5 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -169,7 +169,7 @@ def _get_cython_function( f"function is not implemented for this dtype: " f"[how->{how},dtype->{dtype_str}]" ) - elif "object" not in f.__signatures__: + if "object" not in f.__signatures__: # raise NotImplementedError here rather than TypeError later raise NotImplementedError( f"function is not implemented for this dtype: " @@ -241,10 +241,10 @@ def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False): # non-cython implementation. if how in ["sum", "prod", "cumsum", "cumprod"]: raise TypeError(f"{dtype} type does not support {how} operations") - elif how not in ["rank"]: + if how not in ["rank"]: # only "rank" is implemented in cython raise NotImplementedError(f"{dtype} dtype not supported") - elif not dtype.ordered: + if not dtype.ordered: # TODO: TypeError? raise NotImplementedError(f"{dtype} dtype not supported") @@ -628,7 +628,7 @@ def cython_operation( """ if values.ndim > 2: raise NotImplementedError("number of dimensions is currently limited to 2") - elif values.ndim == 2: + if values.ndim == 2: assert axis == 1, axis elif not is_1d_only_ea_dtype(values.dtype): # Note: it is *not* the case that axis is always 0 for 1-dim values, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 72391ca3282c9..c6425f007ed05 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1620,7 +1620,7 @@ def _validate_names( if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") - elif names is None and name is None: + if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): @@ -1801,16 +1801,16 @@ def set_names( if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") - elif level is not None and not is_list_like(level) and is_list_like(names): + if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") - elif not is_list_like(names) and level is None and self.nlevels > 1: + if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") - elif is_dict_like(names) and not isinstance(self, ABCMultiIndex): + if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") - elif is_dict_like(names) and level is not None: + if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: @@ -1919,7 +1919,7 @@ def _validate_index_level(self, level) -> None: "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) - elif level > 0: + if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) @@ -3847,7 +3847,7 @@ def _check_indexing_method( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) - elif method in ("pad", "backfill"): + if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" @@ -4149,8 +4149,7 @@ def _raise_invalid_indexer( ) if reraise is not lib.no_default: raise TypeError(msg) from reraise - else: - raise TypeError(msg) + raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 73e25f9fe2f06..f4eb67611b0e7 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -999,7 +999,7 @@ def date_range( "Deprecated argument `closed` cannot be passed" "if argument `inclusive` is not None" ) - elif closed is not lib.no_default: + if closed is not lib.no_default: warnings.warn( "Argument `closed` is deprecated in favor of `inclusive`.", FutureWarning, diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index fa10aee4b6c72..4a24322d330f3 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -660,7 +660,7 @@ def get_loc( matches = mask.sum() if matches == 0: raise KeyError(key) - elif matches == 1: + if matches == 1: return mask.argmax() res = lib.maybe_booleans_to_slice(mask.view("u1")) @@ -786,7 +786,7 @@ def _convert_slice_indexer(self, key: slice, kind: str, is_frame: bool = False): msg = "label-based slicing with step!=1 is not supported for IntervalIndex" if kind == "loc": raise ValueError(msg) - elif kind == "getitem": + if kind == "getitem": if not is_valid_positional_slice(key): # i.e. this cannot be interpreted as a positional slice raise ValueError(msg) @@ -1075,7 +1075,7 @@ def interval_range( if not _is_valid_endpoint(start): raise ValueError(f"start must be numeric or datetime-like, got {start}") - elif not _is_valid_endpoint(end): + if not _is_valid_endpoint(end): raise ValueError(f"end must be numeric or datetime-like, got {end}") if is_float(periods): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 5cde5dbda7ae0..d37a2ef3cb1af 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -204,7 +204,7 @@ def names_compat(meth: F) -> F: def new_meth(self_or_cls, *args, **kwargs): if "name" in kwargs and "names" in kwargs: raise TypeError("Can only provide one of `names` and `name`") - elif "name" in kwargs: + if "name" in kwargs: kwargs["names"] = kwargs.pop("name") return meth(self_or_cls, *args, **kwargs) @@ -482,7 +482,7 @@ def from_arrays( error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): raise TypeError(error_msg) - elif is_iterator(arrays): + if is_iterator(arrays): arrays = list(arrays) # Check if elements of array are list-like @@ -553,7 +553,7 @@ def from_tuples( """ if not is_list_like(tuples): raise TypeError("Input must be a list / sequence of tuple-likes.") - elif is_iterator(tuples): + if is_iterator(tuples): tuples = list(tuples) tuples = cast(Collection[Tuple[Hashable, ...]], tuples) @@ -642,7 +642,7 @@ def from_product( if not is_list_like(iterables): raise TypeError("Input must be a list / sequence of iterables.") - elif is_iterator(iterables): + if is_iterator(iterables): iterables = list(iterables) codes, levels = factorize_from_iterables(iterables) @@ -1499,7 +1499,7 @@ def _get_level_number(self, level) -> int: except ValueError as err: if not is_integer(level): raise KeyError(f"Level {level} not found") from err - elif level < 0: + if level < 0: level += self.nlevels if level < 0: orig_level = level - self.nlevels @@ -3653,12 +3653,12 @@ def astype(self, dtype, copy: bool = True): if is_categorical_dtype(dtype): msg = "> 1 ndim Categorical are not supported at this time" raise NotImplementedError(msg) - elif not is_object_dtype(dtype): + if not is_object_dtype(dtype): raise TypeError( "Setting a MultiIndex dtype to anything other than object " "is not supported" ) - elif copy is True: + if copy is True: return self._view() return self diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 648dca5d1b58b..816a1752c5bf0 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -254,11 +254,11 @@ def _convert_tolerance(self, tolerance, target): f"tolerance argument for {type(self).__name__} must contain " "numeric elements if it is list type" ) - else: - raise ValueError( - f"tolerance argument for {type(self).__name__} must be numeric " - f"if it is a scalar: {repr(tolerance)}" - ) + + raise ValueError( + f"tolerance argument for {type(self).__name__} must be numeric " + f"if it is a scalar: {repr(tolerance)}" + ) return tolerance @classmethod diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4cc8a12de6821..35a54e04f232d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1655,7 +1655,7 @@ def iget(self, i: int | tuple[int, int] | tuple[slice, int]): col, loc = i if not com.is_null_slice(col) and col != 0: raise IndexError(f"{self} only contains one item") - elif isinstance(col, slice): + if isinstance(col, slice): # the is_null_slice check above assures that col is slice(None) # so what we want is a view on all our columns and row loc if loc < 0: @@ -2162,7 +2162,7 @@ def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: f"values.ndim > ndim [{values.ndim} > {ndim}]" ) - elif not is_1d_only_ea_dtype(values.dtype): + if not is_1d_only_ea_dtype(values.dtype): # TODO(EA2D): special case not needed with 2D EAs if values.ndim != ndim: raise ValueError( diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 28eab57ac7bde..f7c8a0b91c3f4 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -652,7 +652,7 @@ def _extract_index(data) -> Index: if not indexes and not raw_lengths: raise ValueError("If using all scalar values, you must pass an index") - elif have_series: + if have_series: index = union_indexes(indexes) elif have_dicts: index = union_indexes(indexes, sort=False) @@ -1015,7 +1015,7 @@ def _validate_or_indexify_columns( f"{len(columns)} columns passed, passed data had " f"{len(content)} columns" ) - elif is_mi_list: + if is_mi_list: # check if nested list column, length of each sub-list should be equal if len({len(col) for col in columns}) > 1: @@ -1024,7 +1024,7 @@ def _validate_or_indexify_columns( ) # if columns is not empty and length of sublist is not equal to content - elif columns and len(columns[0]) != len(content): + if columns and len(columns[0]) != len(content): raise ValueError( f"{len(columns[0])} columns passed, passed data had " f"{len(content)} columns" diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 4a3707bbf1070..bc05e9a3d7c3f 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -97,8 +97,7 @@ def _masked_arith_op(x: np.ndarray, y, op): if len(x) != len(y): raise ValueError(x.shape, y.shape) - else: - ymask = notna(y) + ymask = notna(y) # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex # we would get int64 dtype, see GH#19956 diff --git a/pandas/core/resample.py b/pandas/core/resample.py index e32e89b705cc0..f0ffd694ff953 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1947,7 +1947,7 @@ def _get_timestamp_range_edges( index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): raise ValueError("The origin must have the same timezone as the index.") - elif origin == "epoch": + if origin == "epoch": # set the epoch based on the timezone to have similar bins results when # resampling on the same kind of indexes on different timezones origin = Timestamp("1970-01-01", tz=index_tz) diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py index a39e3c1f10956..a3c5e588f61a9 100644 --- a/pandas/core/reshape/encoding.py +++ b/pandas/core/reshape/encoding.py @@ -508,7 +508,7 @@ def from_dummies( "Dummy DataFrame contains multi-assignment(s); " f"First instance in row: {assigned.idxmax()}" ) - elif any(assigned == 0): + if any(assigned == 0): if isinstance(default_category, dict): cats.append(default_category[prefix]) else: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 3f98ab16c6797..8b87cbbba9ac7 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -306,7 +306,7 @@ def _merger(x, y) -> DataFrame: if left_by is not None and right_by is not None: raise ValueError("Can only group either left or right frames") - elif left_by is not None: + if left_by is not None: if isinstance(left_by, str): left_by = [left_by] check = set(left_by).difference(left.columns) @@ -1542,11 +1542,11 @@ def _validate(self, validate: str) -> None: "Merge keys are not unique in either left " "or right dataset; not a one-to-one merge" ) - elif not left_unique: + if not left_unique: raise MergeError( "Merge keys are not unique in left dataset; not a one-to-one merge" ) - elif not right_unique: + if not right_unique: raise MergeError( "Merge keys are not unique in right dataset; not a one-to-one merge" ) @@ -2073,15 +2073,13 @@ def injection(obj): side = "left" if isna(left_values).any(): raise ValueError(f"Merge keys contain null values on {side} side") - else: - raise ValueError(f"{side} keys must be sorted") + raise ValueError(f"{side} keys must be sorted") if not Index(right_values).is_monotonic_increasing: side = "right" if isna(right_values).any(): raise ValueError(f"Merge keys contain null values on {side} side") - else: - raise ValueError(f"{side} keys must be sorted") + raise ValueError(f"{side} keys must be sorted") # initial type conversion as needed if needs_i8_conversion(left_values): @@ -2465,8 +2463,7 @@ def _validate_operand(obj: DataFrame | Series) -> DataFrame: elif isinstance(obj, ABCSeries): if obj.name is None: raise ValueError("Cannot merge a Series without a name") - else: - return obj.to_frame() + return obj.to_frame() else: raise TypeError( f"Can only merge Series or DataFrame objects, a {type(obj)} was passed" diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 3372fa64d86fe..243a7c547bbb5 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -263,7 +263,7 @@ def cut( raise ValueError( "cannot specify integer `bins` when input data contains infinity" ) - elif mn == mx: # adjust end points before binning + if mn == mx: # adjust end points before binning mn -= 0.001 * abs(mn) if mn != 0 else 0.001 mx += 0.001 * abs(mx) if mx != 0 else 0.001 bins = np.linspace(mn, mx, bins + 1, endpoint=True) @@ -421,8 +421,7 @@ def _bins_to_cuts( f"Bin edges must be unique: {repr(bins)}.\n" f"You can drop duplicate edges by setting the 'duplicates' kwarg" ) - else: - bins = unique_bins + bins = unique_bins side: Literal["left", "right"] = "left" if right else "right" ids = ensure_platform_int(bins.searchsorted(x, side=side)) @@ -440,7 +439,7 @@ def _bins_to_cuts( "list-like argument" ) - elif labels is None: + if labels is None: labels = _format_labels( bins, precision, right=right, include_lowest=include_lowest, dtype=dtype ) diff --git a/pandas/core/series.py b/pandas/core/series.py index 9f05eba00b05c..a5a49265304e4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -398,7 +398,7 @@ def __init__( raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) - elif isinstance(data, Index): + if isinstance(data, Index): if dtype is not None: # astype copies diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index ac49cd4cd4330..bb2f663dedb33 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -474,7 +474,7 @@ def _array_strptime_with_fallback( except OutOfBoundsDatetime: if errors == "raise": raise - elif errors == "coerce": + if errors == "coerce": result = np.empty(arg.shape, dtype="M8[ns]") iresult = result.view("i8") iresult.fill(iNaT) @@ -487,7 +487,7 @@ def _array_strptime_with_fallback( if not infer_datetime_format: if errors == "raise": raise - elif errors == "coerce": + if errors == "coerce": result = np.empty(arg.shape, dtype="M8[ns]") iresult = result.view("i8") iresult.fill(iNaT) diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 705c77090e168..784549b53bc32 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -222,7 +222,7 @@ def _coerce_scalar_to_timedelta_type( except ValueError: if errors == "raise": raise - elif errors == "ignore": + if errors == "ignore": return r # coerce diff --git a/pandas/core/tools/times.py b/pandas/core/tools/times.py index d0a4342254e16..edb6b97ad2e53 100644 --- a/pandas/core/tools/times.py +++ b/pandas/core/tools/times.py @@ -77,7 +77,7 @@ def _convert_listlike(arg, format): f"format {format}" ) raise ValueError(msg) from err - elif errors == "ignore": + if errors == "ignore": return arg else: times.append(None) diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index a5ee93aded420..c5c401d415ad0 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -1005,10 +1005,9 @@ def mean(self, *args, update=None, update_times=None, **kwargs): is_frame = self._selected_obj.ndim == 2 if update_times is not None: raise NotImplementedError("update_times is not implemented.") - else: - update_deltas = np.ones( - max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64 - ) + update_deltas = np.ones( + max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64 + ) if update is not None: if self._mean.last_ewm is None: raise ValueError( diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 4ac09a7149857..08c1285b24d23 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -171,9 +171,9 @@ def _validate(self) -> None: if self.min_periods is not None: if not is_integer(self.min_periods): raise ValueError("min_periods must be an integer") - elif self.min_periods < 0: + if self.min_periods < 0: raise ValueError("min_periods must be >= 0") - elif is_integer(self.window) and self.min_periods > self.window: + if is_integer(self.window) and self.min_periods > self.window: raise ValueError( f"min_periods {self.min_periods} must be <= window {self.window}" ) @@ -205,7 +205,7 @@ def _validate(self) -> None: if self.step is not None: if not is_integer(self.step): raise ValueError("step must be an integer") - elif self.step < 0: + if self.step < 0: raise ValueError("step must be >= 0") def _check_window_bounds( @@ -216,7 +216,7 @@ def _check_window_bounds( f"start ({len(start)}) and end ({len(end)}) bounds must be the " f"same length" ) - elif len(start) != (num_vals + (self.step or 1) - 1) // (self.step or 1): + if len(start) != (num_vals + (self.step or 1) - 1) // (self.step or 1): raise ValueError( f"start and end bounds ({len(start)}) must be the same length " f"as the object ({num_vals}) divided by the step ({self.step}) " @@ -363,16 +363,15 @@ def _prep_values(self, values: ArrayLike) -> np.ndarray: f"ops for {type(self).__name__} for this " f"dtype {values.dtype} are not implemented" ) - else: - # GH #12373 : rolling functions error on float32 data - # make sure the data is coerced to float64 - try: - if isinstance(values, ExtensionArray): - values = values.to_numpy(np.float64, na_value=np.nan) - else: - values = ensure_float64(values) - except (ValueError, TypeError) as err: - raise TypeError(f"cannot handle this type -> {values.dtype}") from err + # GH #12373 : rolling functions error on float32 data + # make sure the data is coerced to float64 + try: + if isinstance(values, ExtensionArray): + values = values.to_numpy(np.float64, na_value=np.nan) + else: + values = ensure_float64(values) + except (ValueError, TypeError) as err: + raise TypeError(f"cannot handle this type -> {values.dtype}") from err # Convert inf to nan for C funcs inf = np.isinf(values) @@ -418,7 +417,7 @@ def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame: """Validate and finalize result.""" if out.shape[1] == 0 and obj.shape[1] > 0: raise DataError("No numeric types to aggregate") - elif out.shape[1] == 0: + if out.shape[1] == 0: return obj.astype("float64") self._insert_on_column(out, obj) @@ -1141,7 +1140,7 @@ def _validate(self): raise NotImplementedError( "BaseIndexer subclasses not implemented with win_types." ) - elif not is_integer(self.window) or self.window < 0: + if not is_integer(self.window) or self.window < 0: raise ValueError("window must be an integer 0 or greater") if self.method != "single": @@ -1559,10 +1558,9 @@ def std( if maybe_use_numba(engine): if self.method == "table": raise NotImplementedError("std not supported with method='table'") - else: - from pandas.core._numba.kernels import sliding_var + from pandas.core._numba.kernels import sliding_var - return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof)) + return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof)) window_func = window_aggregations.roll_var def zsqrt_func(values, begin, end, min_periods): @@ -1588,10 +1586,9 @@ def var( if maybe_use_numba(engine): if self.method == "table": raise NotImplementedError("var not supported with method='table'") - else: - from pandas.core._numba.kernels import sliding_var + from pandas.core._numba.kernels import sliding_var - return self._numba_apply(sliding_var, engine_kwargs, ddof) + return self._numba_apply(sliding_var, engine_kwargs, ddof) window_func = partial(window_aggregations.roll_var, ddof=ddof) return self._apply( window_func, diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index d28162ce4d0be..6362e892f0012 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -1392,8 +1392,7 @@ def check_extension(cls, ext: str) -> Literal[True]: ext = ext[1:] if not any(ext in extension for extension in cls._supported_extensions): raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'") - else: - return True + return True # Allow use as a contextmanager def __enter__(self) -> ExcelWriter: @@ -1462,9 +1461,8 @@ def inspect_excel_format( buf = stream.read(PEEK_SIZE) if buf is None: raise ValueError("stream is empty") - else: - assert isinstance(buf, bytes) - peek = buf + assert isinstance(buf, bytes) + peek = buf stream.seek(0) if any(peek.startswith(sig) for sig in XLS_SIGNATURES): diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 6ab57b0cce2a4..9f3d7d965f7c9 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -213,8 +213,7 @@ def write_cols(self) -> Sequence[Hashable]: raise ValueError( f"Writing {len(self.cols)} cols but got {len(self.header)} aliases" ) - else: - return self.header + return self.header else: # self.cols is an ndarray derived from Index._format_native_types, # so its entries are strings, i.e. hashable diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 5c9b3a76123c4..a26b85390fd49 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -658,8 +658,7 @@ def _format_header_regular(self) -> Iterable[ExcelCell]: f"Writing {len(self.columns)} cols " f"but got {len(self.header)} aliases" ) - else: - colnames = self.header + colnames = self.header for colindex, colname in enumerate(colnames): yield CssExcelCell( diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 841def76a156f..cedfd4f58a272 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -3002,7 +3002,7 @@ def bar( # pylint: disable=disallowed-name if not 0 <= width <= 100: raise ValueError(f"`width` must be a value in [0, 100], got {width}") - elif not 0 <= height <= 100: + if not 0 <= height <= 100: raise ValueError(f"`height` must be a value in [0, 100], got {height}") if subset is None: @@ -3560,12 +3560,12 @@ def _validate_apply_axis_arg( f"'{arg_name}' is a Series but underlying data for operations " f"is a DataFrame since 'axis=None'" ) - elif isinstance(arg, DataFrame) and isinstance(data, Series): + if isinstance(arg, DataFrame) and isinstance(data, Series): raise ValueError( f"'{arg_name}' is a DataFrame but underlying data for " f"operations is a Series with 'axis in [0,1]'" ) - elif isinstance(arg, (Series, DataFrame)): # align indx / cols to data + if isinstance(arg, (Series, DataFrame)): # align indx / cols to data arg = arg.reindex_like(data, method=None).to_numpy(**dtype) else: arg = np.asarray(arg, **dtype) diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 06e5702896f2f..0f93027f3f775 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -904,7 +904,7 @@ def concatenated_visible_rows(obj, n, row_indices): f"`clines` value of {clines} is invalid. Should either be None or one " f"of 'all;data', 'all;index', 'skip-last;data', 'skip-last;index'." ) - elif clines is not None: + if clines is not None: data_len = len(row_body_cells) if "data" in clines and d["body"] else 0 d["clines"] = defaultdict(list) diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 7ed7ce18e355b..3791dba6e36e3 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -403,7 +403,7 @@ def _pull_field( f"Key {e} not found. If specifying a record_path, all elements of " f"data should have the path." ) from e - elif errors == "ignore": + if errors == "ignore": return np.nan else: raise KeyError( diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index df02a6fbca295..1c14722227124 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -305,7 +305,7 @@ def write( "Cannot use both partition_on and " "partition_cols. Use partition_cols for partitioning data" ) - elif "partition_on" in kwargs: + if "partition_on" in kwargs: partition_cols = kwargs.pop("partition_on") if partition_cols is not None: diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 0fbc829d9772c..121c52ba1c323 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -756,7 +756,7 @@ def _alert_malformed(self, msg: str, row_num: int) -> None: """ if self.on_bad_lines == self.BadLineHandleMethod.ERROR: raise ParserError(msg) - elif self.on_bad_lines == self.BadLineHandleMethod.WARN: + if self.on_bad_lines == self.BadLineHandleMethod.WARN: base = f"Skipping line {row_num}: " sys.stderr.write(base + msg + "\n") diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index f2b466b06e062..62e9e8a334588 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1274,7 +1274,7 @@ def read_fwf( # Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") - elif colspecs not in (None, "infer") and widths is not None: + if colspecs not in (None, "infer") and widths is not None: raise ValueError("You must specify only one of 'widths' and 'colspecs'") # Compute 'colspecs' from 'widths', if specified. @@ -1391,11 +1391,10 @@ def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: f"The {repr(argname)} option is not supported with the " f"'pyarrow' engine" ) - elif argname == "mangle_dupe_cols" and value is False: + if argname == "mangle_dupe_cols" and value is False: # GH12935 raise ValueError("Setting mangle_dupe_cols=False is not supported yet") - else: - options[argname] = value + options[argname] = value for argname, default in _c_parser_defaults.items(): if argname in kwds: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index cf341a70c3741..5c5e9501c111f 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -4964,14 +4964,14 @@ def _maybe_convert_for_string_atom( if inferred_type == "date": raise TypeError("[date] is not implemented as a table column") - elif inferred_type == "datetime": + if inferred_type == "datetime": # after GH#8260 # this only would be hit for a multi-timezone dtype which is an error raise TypeError( "too many timezones in this block, create separate data columns" ) - elif not (inferred_type == "string" or dtype_name == "object"): + if not (inferred_type == "string" or dtype_name == "object"): return bvalues mask = isna(bvalues) diff --git a/pandas/io/spss.py b/pandas/io/spss.py index 1b83d339a2990..32efd6ca1180c 100644 --- a/pandas/io/spss.py +++ b/pandas/io/spss.py @@ -44,8 +44,7 @@ def read_spss( if usecols is not None: if not is_list_like(usecols): raise TypeError("usecols must be list-like.") - else: - usecols = list(usecols) # pyreadstat requires a list + usecols = list(usecols) # pyreadstat requires a list df, _ = pyreadstat.read_sav( stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 8bcee36cbef2b..591fa25bd36d1 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -751,8 +751,7 @@ def pandasSQL_builder(con, schema: str | None = None) -> SQLDatabase | SQLiteDat if isinstance(con, str): if sqlalchemy is None: raise ImportError("Using URI string without sqlalchemy installed.") - else: - con = sqlalchemy.create_engine(con) + con = sqlalchemy.create_engine(con) if sqlalchemy is not None and isinstance(con, sqlalchemy.engine.Connectable): return SQLDatabase(con, schema=schema) @@ -828,7 +827,7 @@ def create(self) -> None: if self.exists(): if self.if_exists == "fail": raise ValueError(f"Table '{self.name}' already exists.") - elif self.if_exists == "replace": + if self.if_exists == "replace": self.pd_sql.drop_table(self.name, self.schema) self._execute_create() elif self.if_exists == "append": @@ -1038,8 +1037,7 @@ def _index_name(self, index, index_label): "Length of 'index_label' should match number of " f"levels, which is {nlevels}" ) - else: - return index_label + return index_label # return the used column labels for the index columns if ( nlevels == 1 @@ -1332,8 +1330,7 @@ def insert_records( err_text = str(err.orig) if re.search(msg, err_text): raise ValueError("inf cannot be used with MySQL") from err - else: - raise err + raise err def get_engine(engine: str) -> BaseEngine: @@ -1362,7 +1359,7 @@ def get_engine(engine: str) -> BaseEngine: f"{error_msgs}" ) - elif engine == "sqlalchemy": + if engine == "sqlalchemy": return SQLAlchemyEngine() raise ValueError("engine must be one of 'auto', 'sqlalchemy'") diff --git a/pandas/io/xml.py b/pandas/io/xml.py index 6432bba4c1a7c..c44972e9b1a4a 100644 --- a/pandas/io/xml.py +++ b/pandas/io/xml.py @@ -209,7 +209,7 @@ def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]: if self.elems_only and self.attrs_only: raise ValueError("Either element or attributes can be parsed not both.") - elif self.elems_only: + if self.elems_only: if self.names: dicts = [ { @@ -482,9 +482,9 @@ def _validate_path(self) -> list[Any]: if elems is not None: if self.elems_only and children == []: raise ValueError(msg) - elif self.attrs_only and attrs == {}: + if self.attrs_only and attrs == {}: raise ValueError(msg) - elif children == [] and attrs == {}: + if children == [] and attrs == {}: raise ValueError(msg) except (KeyError, SyntaxError): @@ -595,9 +595,9 @@ def _validate_path(self) -> list[Any]: if elems != []: if self.elems_only and children == []: raise ValueError(msg) - elif self.attrs_only and attrs == {}: + if self.attrs_only and attrs == {}: raise ValueError(msg) - elif children == [] and attrs == {}: + if children == [] and attrs == {}: raise ValueError(msg) return elems diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 35d743a64dd7b..84947c4cfa4fc 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -936,7 +936,7 @@ def __call__(self, *args, **kwargs): raise ValueError( f"{kind} requires either y column or 'subplots=True'" ) - elif y is not None: + if y is not None: if is_integer(y) and not data.columns.holds_integer(): y = data.columns[y] # converted to series actually. copy to not modify @@ -1693,13 +1693,13 @@ def scatter(self, x, y, s=None, c=None, **kwargs) -> PlotAccessor: size = kwargs.pop("size", None) if s is not None and size is not None: raise TypeError("Specify exactly one of `s` and `size`") - elif s is not None or size is not None: + if s is not None or size is not None: kwargs["s"] = s if s is not None else size color = kwargs.pop("color", None) if c is not None and color is not None: raise TypeError("Specify exactly one of `c` and `color`") - elif c is not None or color is not None: + if c is not None or color is not None: kwargs["c"] = c if c is not None else color return self(kind="scatter", x=x, y=y, **kwargs) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 605cc1d94e0af..70e5d1fbee929 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -257,7 +257,7 @@ def __init__( # Probably better to accept either. if "cmap" in kwds and colormap: raise TypeError("Only specify one of `cmap` and `colormap`.") - elif "cmap" in kwds: + if "cmap" in kwds: self.colormap = kwds.pop("cmap") else: self.colormap = colormap @@ -358,16 +358,15 @@ def _validate_subplots_kwarg( raise ValueError( f"Column label(s) {list(bad_labels)} not found in the DataFrame." ) - else: - unique_columns = set(group) - duplicates = seen_columns.intersection(unique_columns) - if duplicates: - raise ValueError( - "Each column should be in only one subplot. " - f"Columns {duplicates} were found in multiple subplots." - ) - seen_columns = seen_columns.union(unique_columns) - out.append(tuple(idx_locs)) + unique_columns = set(group) + duplicates = seen_columns.intersection(unique_columns) + if duplicates: + raise ValueError( + "Each column should be in only one subplot. " + f"Columns {duplicates} were found in multiple subplots." + ) + seen_columns = seen_columns.union(unique_columns) + out.append(tuple(idx_locs)) unseen_columns = columns.difference(seen_columns) for column in unseen_columns: @@ -1217,7 +1216,7 @@ def _make_plot(self): color = self.kwds.pop("color", None) if c is not None and color is not None: raise TypeError("Specify exactly one of `c` and `color`") - elif c is None and color is None: + if c is None and color is None: c_values = self.plt.rcParams["patch.facecolor"] elif color is not None: c_values = color diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index b18264c655903..2f482c7d86571 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -387,7 +387,7 @@ def parallel_coordinates( elif xticks is not None: if not np.all(np.isreal(xticks)): raise ValueError("xticks specified must be numeric") - elif len(xticks) != ncols: + if len(xticks) != ncols: raise ValueError("Length of xticks must match number of columns") x = xticks else: diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 5bc323d82259e..8ffc49cd25915 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -308,8 +308,7 @@ def test_wrap_agg_out(three_group): def func(ser): if ser.dtype == object: raise TypeError - else: - return ser.sum() + return ser.sum() with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"): result = grouped.aggregate(func) diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index b40514568452c..c8aaf71fa419e 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -109,8 +109,7 @@ def test_filter_condition_raises(): def raise_if_sum_is_zero(x): if x.sum() == 0: raise ValueError - else: - return x.sum() > 0 + return x.sum() > 0 s = Series([-1, 0, 1, 2]) grouper = s.apply(lambda x: x % 2) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 9fdc0f02e8652..392910bd9e598 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1018,8 +1018,7 @@ def test_wrap_aggregated_output_multindex(mframe): def aggfun(ser): if ser.name == ("foo", "one"): raise TypeError - else: - return ser.sum() + return ser.sum() with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"): agged2 = df.groupby(keys).aggregate(aggfun) diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 5d62f57126ef3..f18066769f214 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -206,8 +206,7 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]: f"or {repr(new_arg_name)}, not both." ) raise TypeError(msg) - else: - kwargs[new_arg_name] = new_arg_value + kwargs[new_arg_name] = new_arg_value return func(*args, **kwargs) return cast(F, wrapper) diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 4594c0cb057df..646e05b08a8a9 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -386,7 +386,7 @@ def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = Tru if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") - elif value is None and method is not None: + if value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: diff --git a/pyproject.toml b/pyproject.toml index 2e2e3fd914c86..b1ecc03993e73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -110,7 +110,6 @@ disable = [ "cyclic-import", "duplicate-code", "inconsistent-return-statements", - "no-else-raise", "no-else-return", "redefined-argument-from-local", "too-few-public-methods", From 6de8a567a28438af1ab3d70f1f2fd58072eb25bf Mon Sep 17 00:00:00 2001 From: carlotta Date: Fri, 4 Nov 2022 13:51:40 +0100 Subject: [PATCH 2/2] fix possible imbalanced tuple unpacking warning --- ci/fix_wheels.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ci/fix_wheels.py b/ci/fix_wheels.py index ed7957fac643b..525aacf572cd4 100644 --- a/ci/fix_wheels.py +++ b/ci/fix_wheels.py @@ -4,7 +4,12 @@ import zipfile try: - _, wheel_path, dest_dir = sys.argv + if len(sys.argv) != 3: + raise ValueError( + "User must pass the path to the wheel and the destination directory." + ) + wheel_path = sys.argv[1] + dest_dir = sys.argv[2] # Figure out whether we are building on 32 or 64 bit python is_32 = sys.maxsize <= 2**32 PYTHON_ARCH = "x86" if is_32 else "x64" @@ -50,5 +55,4 @@ if not success: os.remove(repaired_wheel_path) raise exception -else: - print(f"Successfully repaired wheel was written to {repaired_wheel_path}") +print(f"Successfully repaired wheel was written to {repaired_wheel_path}")