diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 2073f110d536f..b1f98199f9fba 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -570,8 +570,7 @@ def __iter__(self): converted = ints_to_pydatetime( data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp" ) - for v in converted: - yield v + yield from converted def astype(self, dtype, copy=True): # We handle diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 853f7bb0b0d81..c88af77ea6189 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1427,7 +1427,7 @@ def sparse_arithmetic_method(self, other): # TODO: look into _wrap_result if len(self) != len(other): raise AssertionError( - (f"length mismatch: {len(self)} vs. {len(other)}") + f"length mismatch: {len(self)} vs. {len(other)}" ) if not isinstance(other, SparseArray): dtype = getattr(other, "dtype", None) diff --git a/pandas/core/common.py b/pandas/core/common.py index 968fb180abcd0..b860c83f89cbc 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -62,8 +62,7 @@ def flatten(l): """ for el in l: if iterable_not_string(el): - for s in flatten(el): - yield s + yield from flatten(el) else: yield el @@ -434,10 +433,8 @@ def random_state(state=None): return np.random else: raise ValueError( - ( - "random_state must be an integer, array-like, a BitGenerator, " - "a numpy RandomState, or None" - ) + "random_state must be an integer, array-like, a BitGenerator, " + "a numpy RandomState, or None" ) diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 09fc53716dda9..8c56f02c8d3cc 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -153,7 +153,7 @@ def _preparse( the ``tokenize`` module and ``tokval`` is a string. """ assert callable(f), "f must be callable" - return tokenize.untokenize((f(x) for x in tokenize_string(source))) + return tokenize.untokenize(f(x) for x in tokenize_string(source)) def _is_type(t): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 8dd7c1a22d0ae..d876c655421ef 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -554,7 +554,7 @@ def __init__( else: w = _validate_where(w) where[idx] = w - _where = " & ".join((f"({w})" for w in com.flatten(where))) + _where = " & ".join(f"({w})" for w in com.flatten(where)) else: _where = where diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cc18b8681200f..0b9021b094cd7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1681,10 +1681,7 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray: label_axis_name = "column" if axis == 0 else "index" raise ValueError( - ( - f"The {label_axis_name} label '{key}' " - f"is not unique.{multi_message}" - ) + f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values @@ -1725,10 +1722,8 @@ def _drop_labels_or_levels(self, keys, axis: int = 0): if invalid_keys: raise ValueError( - ( - "The following keys are not valid labels or " - f"levels for axis {axis}: {invalid_keys}" - ) + "The following keys are not valid labels or " + f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 9cfd13f95ca0e..2387427d15670 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -93,15 +93,8 @@ def _gotitem(self, key, ndim, subset=None): ) series_apply_allowlist = ( - ( - common_apply_allowlist - | { - "nlargest", - "nsmallest", - "is_monotonic_increasing", - "is_monotonic_decreasing", - } - ) + common_apply_allowlist + | {"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"} ) | frozenset(["dtype", "unique"]) dataframe_apply_allowlist = common_apply_allowlist | frozenset(["dtypes", "corrwith"]) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index a931221ef3ce1..bbccd22f2ae85 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1212,7 +1212,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return result else: - all_indexed_same = all_indexes_same((x.index for x in values)) + all_indexed_same = all_indexes_same(x.index for x in values) # GH3596 # provide a reduction (Frame -> Series) if groups are diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 222ae589ea7fc..525caab7564a3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -504,7 +504,7 @@ def _maybe_check_unique(self): if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() - msg += "\n{}".format(duplicates) + msg += f"\n{duplicates}" raise DuplicateLabelError(msg) @@ -4315,10 +4315,8 @@ def identical(self, other) -> bool: return ( self.equals(other) and all( - ( - getattr(self, c, None) == getattr(other, c, None) - for c in self._comparables - ) + getattr(self, c, None) == getattr(other, c, None) + for c in self._comparables ) and type(self) == type(other) ) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1ab40a76b30ff..5aa72bb838756 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -398,7 +398,7 @@ def _partial_date_slice( if len(self) and ( (use_lhs and t1 < self[0] and t2 < self[0]) - or ((use_rhs and t1 > self[-1] and t2 > self[-1])) + or (use_rhs and t1 > self[-1] and t2 > self[-1]) ): # we are out of range raise KeyError diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index d95355589fd0c..5a6518995c554 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1837,7 +1837,7 @@ def _get_single_indexer(join_key, index, sort: bool = False): def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool = False): if len(join_keys) > 1: if not ( - (isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels) + isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels ): raise AssertionError( "If more than one join key is given then "