diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py index 595af6dc08733..c207b96a8d308 100644 --- a/pandas/core/arrays/sparse/accessor.py +++ b/pandas/core/arrays/sparse/accessor.py @@ -323,13 +323,7 @@ def _prep_index(data, index, columns): columns = ibase.default_index(K) if len(columns) != K: - raise ValueError( - "Column length mismatch: {columns} vs. {K}".format( - columns=len(columns), K=K - ) - ) + raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}") if len(index) != N: - raise ValueError( - "Index length mismatch: {index} vs. {N}".format(index=len(index), N=N) - ) + raise ValueError(f"Index length mismatch: {len(index)} vs. {N}") return index, columns diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index e909e92139c80..548f2bf702e60 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -143,13 +143,13 @@ def _sparse_array_op( name = name[1:] if name in ("and", "or") and dtype == "bool": - opname = "sparse_{name}_uint8".format(name=name) + opname = f"sparse_{name}_uint8" # to make template simple, cast here left_sp_values = left.sp_values.view(np.uint8) right_sp_values = right.sp_values.view(np.uint8) result_dtype = np.bool else: - opname = "sparse_{name}_{dtype}".format(name=name, dtype=dtype) + opname = f"sparse_{name}_{dtype}" left_sp_values = left.sp_values right_sp_values = right.sp_values @@ -364,8 +364,8 @@ def __init__( sparse_values = np.asarray(data, dtype=dtype) if len(sparse_values) != sparse_index.npoints: raise AssertionError( - "Non array-like type {type} must " - "have the same length as the index".format(type=type(sparse_values)) + f"Non array-like type {type(sparse_values)} must " + "have the same length as the index" ) self._sparse_index = sparse_index self._sparse_values = sparse_values @@ -373,7 +373,7 @@ def __init__( @classmethod def _simple_new( - cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype, + cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype ) -> "SparseArray": new = cls([]) new._sparse_index = sparse_index @@ -412,7 +412,7 @@ def from_spmatrix(cls, data): length, ncol = data.shape if ncol != 1: - raise ValueError("'data' must have a single column, not '{}'".format(ncol)) + raise ValueError(f"'data' must have a single column, not '{ncol}'") # our sparse index classes require that the positions be strictly # increasing. So we need to sort loc, and arr accordingly. @@ -771,7 +771,7 @@ def __getitem__(self, key): elif hasattr(key, "__len__"): return self.take(key) else: - raise ValueError("Cannot slice with '{}'".format(key)) + raise ValueError(f"Cannot slice with '{key}'") return type(self)(data_slice, kind=self.kind) @@ -791,9 +791,7 @@ def _get_val_at(self, loc): def take(self, indices, allow_fill=False, fill_value=None): if is_scalar(indices): - raise ValueError( - "'indices' must be an array, not a scalar '{}'.".format(indices) - ) + raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.") indices = np.asarray(indices, dtype=np.int32) if indices.size == 0: @@ -932,8 +930,8 @@ def _concat_same_type(cls, to_concat): if not (len(set(fill_values)) == 1 or isna(fill_values).all()): warnings.warn( "Concatenating sparse arrays with multiple fill " - "values: '{}'. Picking the first and " - "converting the rest.".format(fill_values), + f"values: '{fill_values}'. Picking the first and " + "converting the rest.", PerformanceWarning, stacklevel=6, ) @@ -1153,11 +1151,7 @@ def _reduce(self, name, skipna=True, **kwargs): method = getattr(self, name, None) if method is None: - raise TypeError( - "cannot perform {name} with type {dtype}".format( - name=name, dtype=self.dtype - ) - ) + raise TypeError(f"cannot perform {name} with type {self.dtype}") if skipna: arr = self @@ -1253,7 +1247,7 @@ def cumsum(self, axis=0, *args, **kwargs): nv.validate_cumsum(args, kwargs) if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour. - raise ValueError("axis(={axis}) out of bounds".format(axis=axis)) + raise ValueError(f"axis(={axis}) out of bounds") if not self._null_fill_value: return SparseArray(self.to_dense()).cumsum() @@ -1367,7 +1361,7 @@ def sparse_unary_method(self) -> "SparseArray": dtype = SparseDtype(values.dtype, fill_value) return cls._simple_new(values, self.sp_index, dtype) - name = "__{name}__".format(name=op.__name__) + name = f"__{op.__name__}__" return compat.set_function_name(sparse_unary_method, name, cls) @classmethod @@ -1401,11 +1395,7 @@ def sparse_arithmetic_method(self, other): # TODO: look into _wrap_result if len(self) != len(other): raise AssertionError( - ( - "length mismatch: {self} vs. {other}".format( - self=len(self), other=len(other) - ) - ) + (f"length mismatch: {len(self)} vs. {len(other)}") ) if not isinstance(other, SparseArray): dtype = getattr(other, "dtype", None) @@ -1414,7 +1404,7 @@ def sparse_arithmetic_method(self, other): ) return _sparse_array_op(self, other, op, op_name) - name = "__{name}__".format(name=op.__name__) + name = f"__{op.__name__}__" return compat.set_function_name(sparse_arithmetic_method, name, cls) @classmethod @@ -1434,9 +1424,7 @@ def cmp_method(self, other): # TODO: make this more flexible than just ndarray... if len(self) != len(other): raise AssertionError( - "length mismatch: {self} vs. {other}".format( - self=len(self), other=len(other) - ) + f"length mismatch: {len(self)} vs. {len(other)}" ) other = SparseArray(other, fill_value=self.fill_value) @@ -1454,7 +1442,7 @@ def cmp_method(self, other): dtype=np.bool_, ) - name = "__{name}__".format(name=op.__name__) + name = f"__{op.__name__}__" return compat.set_function_name(cmp_method, name, cls) @classmethod @@ -1473,11 +1461,10 @@ def _add_comparison_ops(cls): # Formatting # ----------- def __repr__(self) -> str: - return "{self}\nFill: {fill}\n{index}".format( - self=printing.pprint_thing(self), - fill=printing.pprint_thing(self.fill_value), - index=printing.pprint_thing(self.sp_index), - ) + pp_str = printing.pprint_thing(self) + pp_fill = printing.pprint_thing(self.fill_value) + pp_index = printing.pprint_thing(self.sp_index) + return f"{pp_str}\nFill: {pp_fill}\n{pp_index}" def _formatter(self, boxed=False): # Defer to the formatter from the GenericArrayFormatter calling us. diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 4fb64ec9255e1..941db116589e8 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -79,9 +79,7 @@ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: fill_value = na_value_for_dtype(dtype) if not is_scalar(fill_value): - raise ValueError( - "fill_value must be a scalar. Got {} instead".format(fill_value) - ) + raise ValueError(f"fill_value must be a scalar. Got {fill_value} instead") self._dtype = dtype self._fill_value = fill_value @@ -163,7 +161,7 @@ def subtype(self): @property def name(self): - return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value) + return f"Sparse[{self.subtype.name}, {self.fill_value}]" def __repr__(self) -> str: return self.name @@ -201,7 +199,7 @@ def construct_from_string(cls, string): ------- SparseDtype """ - msg = "Could not construct SparseDtype from '{}'".format(string) + msg = f"Could not construct SparseDtype from '{string}'" if string.startswith("Sparse"): try: sub_type, has_fill_value = cls._parse_subtype(string) @@ -210,14 +208,14 @@ def construct_from_string(cls, string): else: result = SparseDtype(sub_type) msg = ( - "Could not construct SparseDtype from '{}'.\n\nIt " + f"Could not construct SparseDtype from '{string}'.\n\nIt " "looks like the fill_value in the string is not " "the default for the dtype. Non-default fill_values " "are not supported. Use the 'SparseDtype()' " "constructor instead." ) if has_fill_value and str(result) != string: - raise TypeError(msg.format(string)) + raise TypeError(msg) return result else: raise TypeError(msg) @@ -253,7 +251,7 @@ def _parse_subtype(dtype: str) -> Tuple[str, bool]: elif dtype == "Sparse": subtype = "float64" else: - raise ValueError("Cannot parse {}".format(dtype)) + raise ValueError(f"Cannot parse {dtype}") return subtype, has_fill_value @classmethod diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index 6ae2903d9826c..f244055b05cd4 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -137,7 +137,7 @@ def _coo_to_sparse_series(A, dense_index: bool = False): try: s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) except AttributeError: - raise TypeError("Expected coo_matrix. Got {} instead.".format(type(A).__name__)) + raise TypeError(f"Expected coo_matrix. Got {type(A).__name__} instead.") s = s.sort_index() s = s.astype(SparseDtype(s.dtype)) if dense_index: