Skip to content

CLN: f-string in pandas/core/arrays/sparse/* #30121

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 2 additions & 8 deletions pandas/core/arrays/sparse/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,13 +323,7 @@ def _prep_index(data, index, columns):
columns = ibase.default_index(K)

if len(columns) != K:
raise ValueError(
"Column length mismatch: {columns} vs. {K}".format(
columns=len(columns), K=K
)
)
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(
"Index length mismatch: {index} vs. {N}".format(index=len(index), N=N)
)
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
55 changes: 21 additions & 34 deletions pandas/core/arrays/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,13 +143,13 @@ def _sparse_array_op(
name = name[1:]

if name in ("and", "or") and dtype == "bool":
opname = "sparse_{name}_uint8".format(name=name)
opname = f"sparse_{name}_uint8"
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = "sparse_{name}_{dtype}".format(name=name, dtype=dtype)
opname = f"sparse_{name}_{dtype}"
left_sp_values = left.sp_values
right_sp_values = right.sp_values

Expand Down Expand Up @@ -364,16 +364,16 @@ def __init__(
sparse_values = np.asarray(data, dtype=dtype)
if len(sparse_values) != sparse_index.npoints:
raise AssertionError(
"Non array-like type {type} must "
"have the same length as the index".format(type=type(sparse_values))
f"Non array-like type {type(sparse_values)} must "
"have the same length as the index"
)
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)

@classmethod
def _simple_new(
cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype,
cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype
) -> "SparseArray":
new = cls([])
new._sparse_index = sparse_index
Expand Down Expand Up @@ -412,7 +412,7 @@ def from_spmatrix(cls, data):
length, ncol = data.shape

if ncol != 1:
raise ValueError("'data' must have a single column, not '{}'".format(ncol))
raise ValueError(f"'data' must have a single column, not '{ncol}'")

# our sparse index classes require that the positions be strictly
# increasing. So we need to sort loc, and arr accordingly.
Expand Down Expand Up @@ -771,7 +771,7 @@ def __getitem__(self, key):
elif hasattr(key, "__len__"):
return self.take(key)
else:
raise ValueError("Cannot slice with '{}'".format(key))
raise ValueError(f"Cannot slice with '{key}'")

return type(self)(data_slice, kind=self.kind)

Expand All @@ -791,9 +791,7 @@ def _get_val_at(self, loc):

def take(self, indices, allow_fill=False, fill_value=None):
if is_scalar(indices):
raise ValueError(
"'indices' must be an array, not a scalar '{}'.".format(indices)
)
raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")
indices = np.asarray(indices, dtype=np.int32)

if indices.size == 0:
Expand Down Expand Up @@ -932,8 +930,8 @@ def _concat_same_type(cls, to_concat):
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
warnings.warn(
"Concatenating sparse arrays with multiple fill "
"values: '{}'. Picking the first and "
"converting the rest.".format(fill_values),
f"values: '{fill_values}'. Picking the first and "
"converting the rest.",
PerformanceWarning,
stacklevel=6,
)
Expand Down Expand Up @@ -1153,11 +1151,7 @@ def _reduce(self, name, skipna=True, **kwargs):
method = getattr(self, name, None)

if method is None:
raise TypeError(
"cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype
)
)
raise TypeError(f"cannot perform {name} with type {self.dtype}")

if skipna:
arr = self
Expand Down Expand Up @@ -1253,7 +1247,7 @@ def cumsum(self, axis=0, *args, **kwargs):
nv.validate_cumsum(args, kwargs)

if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError("axis(={axis}) out of bounds".format(axis=axis))
raise ValueError(f"axis(={axis}) out of bounds")

if not self._null_fill_value:
return SparseArray(self.to_dense()).cumsum()
Expand Down Expand Up @@ -1367,7 +1361,7 @@ def sparse_unary_method(self) -> "SparseArray":
dtype = SparseDtype(values.dtype, fill_value)
return cls._simple_new(values, self.sp_index, dtype)

name = "__{name}__".format(name=op.__name__)
name = f"__{op.__name__}__"
return compat.set_function_name(sparse_unary_method, name, cls)

@classmethod
Expand Down Expand Up @@ -1401,11 +1395,7 @@ def sparse_arithmetic_method(self, other):
# TODO: look into _wrap_result
if len(self) != len(other):
raise AssertionError(
(
"length mismatch: {self} vs. {other}".format(
self=len(self), other=len(other)
)
)
(f"length mismatch: {len(self)} vs. {len(other)}")
)
if not isinstance(other, SparseArray):
dtype = getattr(other, "dtype", None)
Expand All @@ -1414,7 +1404,7 @@ def sparse_arithmetic_method(self, other):
)
return _sparse_array_op(self, other, op, op_name)

name = "__{name}__".format(name=op.__name__)
name = f"__{op.__name__}__"
return compat.set_function_name(sparse_arithmetic_method, name, cls)

@classmethod
Expand All @@ -1434,9 +1424,7 @@ def cmp_method(self, other):
# TODO: make this more flexible than just ndarray...
if len(self) != len(other):
raise AssertionError(
"length mismatch: {self} vs. {other}".format(
self=len(self), other=len(other)
)
f"length mismatch: {len(self)} vs. {len(other)}"
)
other = SparseArray(other, fill_value=self.fill_value)

Expand All @@ -1454,7 +1442,7 @@ def cmp_method(self, other):
dtype=np.bool_,
)

name = "__{name}__".format(name=op.__name__)
name = f"__{op.__name__}__"
return compat.set_function_name(cmp_method, name, cls)

@classmethod
Expand All @@ -1473,11 +1461,10 @@ def _add_comparison_ops(cls):
# Formatting
# -----------
def __repr__(self) -> str:
return "{self}\nFill: {fill}\n{index}".format(
self=printing.pprint_thing(self),
fill=printing.pprint_thing(self.fill_value),
index=printing.pprint_thing(self.sp_index),
)
pp_str = printing.pprint_thing(self)
pp_fill = printing.pprint_thing(self.fill_value)
pp_index = printing.pprint_thing(self.sp_index)
return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"

def _formatter(self, boxed=False):
# Defer to the formatter from the GenericArrayFormatter calling us.
Expand Down
14 changes: 6 additions & 8 deletions pandas/core/arrays/sparse/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,7 @@ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
fill_value = na_value_for_dtype(dtype)

if not is_scalar(fill_value):
raise ValueError(
"fill_value must be a scalar. Got {} instead".format(fill_value)
)
raise ValueError(f"fill_value must be a scalar. Got {fill_value} instead")
self._dtype = dtype
self._fill_value = fill_value

Expand Down Expand Up @@ -163,7 +161,7 @@ def subtype(self):

@property
def name(self):
return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value)
return f"Sparse[{self.subtype.name}, {self.fill_value}]"

def __repr__(self) -> str:
return self.name
Expand Down Expand Up @@ -201,7 +199,7 @@ def construct_from_string(cls, string):
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".format(string)
msg = f"Could not construct SparseDtype from '{string}'"
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
Expand All @@ -210,14 +208,14 @@ def construct_from_string(cls, string):
else:
result = SparseDtype(sub_type)
msg = (
"Could not construct SparseDtype from '{}'.\n\nIt "
f"Could not construct SparseDtype from '{string}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead."
)
if has_fill_value and str(result) != string:
raise TypeError(msg.format(string))
raise TypeError(msg)
return result
else:
raise TypeError(msg)
Expand Down Expand Up @@ -253,7 +251,7 @@ def _parse_subtype(dtype: str) -> Tuple[str, bool]:
elif dtype == "Sparse":
subtype = "float64"
else:
raise ValueError("Cannot parse {}".format(dtype))
raise ValueError(f"Cannot parse {dtype}")
return subtype, has_fill_value

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/scipy_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def _coo_to_sparse_series(A, dense_index: bool = False):
try:
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
except AttributeError:
raise TypeError("Expected coo_matrix. Got {} instead.".format(type(A).__name__))
raise TypeError(f"Expected coo_matrix. Got {type(A).__name__} instead.")
s = s.sort_index()
s = s.astype(SparseDtype(s.dtype))
if dense_index:
Expand Down