Skip to content

CLN: change str.format() to f-string #30135

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 5 additions & 7 deletions pandas/core/arrays/string_.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def _validate(self):
if self._ndarray.dtype != "object":
raise ValueError(
"StringArray requires a sequence of strings. Got "
"'{}' dtype instead.".format(self._ndarray.dtype)
f"'{self._ndarray.dtype}' dtype instead."
)

@classmethod
Expand Down Expand Up @@ -222,7 +222,7 @@ def __setitem__(self, key, value):
value = StringDtype.na_value
elif not isinstance(value, str):
raise ValueError(
"Cannot set non-string value '{}' into a StringArray.".format(value)
f"Cannot set non-string value '{value}' into a StringArray."
)
else:
if not is_array_like(value):
Expand All @@ -245,7 +245,7 @@ def astype(self, dtype, copy=True):
return super().astype(dtype, copy)

def _reduce(self, name, skipna=True, **kwargs):
raise TypeError("Cannot perform reduction '{}' with string dtype".format(name))
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")

def value_counts(self, dropna=False):
from pandas import value_counts
Expand All @@ -269,9 +269,7 @@ def method(self, other):
if len(other) != len(self):
# prevent improper broadcasting when other is 2D
raise ValueError(
"Lengths of operands do not match: {} != {}".format(
len(self), len(other)
)
f"Lengths of operands do not match: {len(self)} != {len(other)}"
)

other = np.asarray(other)
Expand All @@ -287,7 +285,7 @@ def method(self, other):
dtype = "object" if mask.any() else "bool"
return np.asarray(result, dtype=dtype)

return compat.set_function_name(method, "__{}__".format(op.__name__), cls)
return compat.set_function_name(method, f"__{op.__name__}__", cls)

@classmethod
def _add_arithmetic_ops(cls):
Expand Down
55 changes: 15 additions & 40 deletions pandas/core/arrays/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,15 +70,15 @@ def f(self):
return result

f.__name__ = name
f.__doc__ = "\n{}\n".format(docstring)
f.__doc__ = f"\n{docstring}\n"
return property(f)


def _td_array_cmp(cls, op):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
opname = "__{name}__".format(name=op.__name__)
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"

@unpack_zerodim_and_defer(opname)
Expand Down Expand Up @@ -215,10 +215,10 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):

if not isinstance(values, np.ndarray):
msg = (
"Unexpected type '{}'. 'values' must be a TimedeltaArray "
"ndarray, or Series or Index containing one of those."
f"Unexpected type '{type(values).__name__}'. 'values' must be a"
" TimedeltaArray ndarray, or Series or Index containing one of those."
)
raise ValueError(msg.format(type(values).__name__))
raise ValueError(msg)
if values.ndim != 1:
raise ValueError("Only 1-dimensional input arrays are supported.")

Expand Down Expand Up @@ -351,10 +351,7 @@ def _validate_fill_value(self, fill_value):
elif isinstance(fill_value, (timedelta, np.timedelta64, Tick)):
fill_value = Timedelta(fill_value).value
else:
raise ValueError(
"'fill_value' should be a Timedelta. "
"Got '{got}'.".format(got=fill_value)
)
raise ValueError(f"'fill_value' should be a Timedelta. Got '{fill_value}'.")
return fill_value

def astype(self, dtype, copy=True):
Expand Down Expand Up @@ -461,9 +458,7 @@ def _format_native_types(self, na_rep="NaT", date_format=None):
def _add_offset(self, other):
assert not isinstance(other, Tick)
raise TypeError(
"cannot add the type {typ} to a {cls}".format(
typ=type(other).__name__, cls=type(self).__name__
)
f"cannot add the type {type(other).__name__} to a {type(self).__name__}"
)

def _add_delta(self, delta):
Expand Down Expand Up @@ -523,9 +518,7 @@ def _addsub_offset_array(self, other, op):
return super()._addsub_offset_array(other, op)
except AttributeError:
raise TypeError(
"Cannot add/subtract non-tick DateOffset to {cls}".format(
cls=type(self).__name__
)
f"Cannot add/subtract non-tick DateOffset to {type(self).__name__}"
)

def __mul__(self, other):
Expand Down Expand Up @@ -634,9 +627,7 @@ def __rtruediv__(self, other):

elif lib.is_scalar(other):
raise TypeError(
"Cannot divide {typ} by {cls}".format(
typ=type(other).__name__, cls=type(self).__name__
)
f"Cannot divide {type(other).__name__} by {type(self).__name__}"
)

if not hasattr(other, "dtype"):
Expand All @@ -659,9 +650,7 @@ def __rtruediv__(self, other):

else:
raise TypeError(
"Cannot divide {dtype} data by {cls}".format(
dtype=other.dtype, cls=type(self).__name__
)
f"Cannot divide {other.dtype} data by {type(self).__name__}"
)

def __floordiv__(self, other):
Expand Down Expand Up @@ -724,11 +713,7 @@ def __floordiv__(self, other):

else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError(
"Cannot divide {typ} by {cls}".format(
typ=dtype, cls=type(self).__name__
)
)
raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")

def __rfloordiv__(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
Expand All @@ -749,9 +734,7 @@ def __rfloordiv__(self, other):
return result

raise TypeError(
"Cannot divide {typ} by {cls}".format(
typ=type(other).__name__, cls=type(self).__name__
)
f"Cannot divide {type(other).__name__} by {type(self).__name__}"
)

if not hasattr(other, "dtype"):
Expand Down Expand Up @@ -779,11 +762,7 @@ def __rfloordiv__(self, other):

else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError(
"Cannot divide {typ} by {cls}".format(
typ=dtype, cls=type(self).__name__
)
)
raise TypeError(f"Cannot divide {dtype} by {type(self).__name__}")

def __mod__(self, other):
# Note: This is a naive implementation, can likely be optimized
Expand Down Expand Up @@ -1056,11 +1035,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):

else:
# This includes datetime64-dtype, see GH#23539, GH#29794
raise TypeError(
"dtype {dtype} cannot be converted to timedelta64[ns]".format(
dtype=data.dtype
)
)
raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]")

data = np.array(data, copy=copy)
if data.ndim != 1:
Expand Down Expand Up @@ -1096,7 +1071,7 @@ def ints_to_td64ns(data, unit="ns"):
copy_made = True

if unit != "ns":
dtype_str = "timedelta64[{unit}]".format(unit=unit)
dtype_str = f"timedelta64[{unit}]"
data = data.view(dtype_str)

# TODO: watch out for overflows when converting from lower-resolution
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def standardize_mapping(into):
return partial(collections.defaultdict, into.default_factory)
into = type(into)
if not issubclass(into, abc.Mapping):
raise TypeError("unsupported type: {into}".format(into=into))
raise TypeError(f"unsupported type: {into}")
elif into == collections.defaultdict:
raise TypeError("to_dict() only accepts initialized defaultdicts")
return into
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/computation/engines.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ def _check_ne_builtin_clash(expr):
if overlap:
s = ", ".join(repr(x) for x in overlap)
raise NumExprClobberingError(
'Variables in expression "{expr}" '
"overlap with builtins: ({s})".format(expr=expr, s=s)
f'Variables in expression "{expr}" overlap with builtins: ({s})'
)


Expand Down
31 changes: 13 additions & 18 deletions pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,10 +282,9 @@ def _filter_nodes(superclass, all_nodes=_all_nodes):
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = "cannot both support and not support {intersection}".format(
intersection=_unsupported_nodes & _base_supported_nodes
)
assert not _unsupported_nodes & _base_supported_nodes, _msg
intersection = _unsupported_nodes & _base_supported_nodes
_msg = f"cannot both support and not support {intersection}"
assert not intersection, _msg


def _node_not_implemented(node_name, cls):
Expand All @@ -312,7 +311,7 @@ def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = "visit_{node}".format(node=node)
name = f"visit_{node}"
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
Expand Down Expand Up @@ -349,13 +348,13 @@ def add_ops(op_classes):

def f(cls):
for op_attr_name, op_class in op_classes.items():
ops = getattr(cls, "{name}_ops".format(name=op_attr_name))
ops_map = getattr(cls, "{name}_op_nodes_map".format(name=op_attr_name))
ops = getattr(cls, f"{op_attr_name}_ops")
ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map")
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, "visit_{node}".format(node=op_node), made_op)
setattr(cls, f"visit_{op_node}", made_op)
return cls

return f
Expand Down Expand Up @@ -529,8 +528,8 @@ def _maybe_evaluate_binop(

if res.has_invalid_return_type:
raise TypeError(
"unsupported operand type(s) for {op}:"
" '{lhs}' and '{rhs}'".format(op=res.op, lhs=lhs.type, rhs=rhs.type)
f"unsupported operand type(s) for {res.op}:"
f" '{lhs.type}' and '{rhs.type}'"
)

if self.engine != "pytables":
Expand Down Expand Up @@ -677,7 +676,7 @@ def visit_Attribute(self, node, **kwargs):
if isinstance(value, ast.Name) and value.id == attr:
return resolved

raise ValueError("Invalid Attribute context {name}".format(name=ctx.__name__))
raise ValueError(f"Invalid Attribute context {ctx.__name__}")

def visit_Call(self, node, side=None, **kwargs):

Expand All @@ -697,7 +696,7 @@ def visit_Call(self, node, side=None, **kwargs):
raise

if res is None:
raise ValueError("Invalid function call {func}".format(func=node.func.id))
raise ValueError(f"Invalid function call {node.func.id}")
if hasattr(res, "value"):
res = res.value

Expand All @@ -707,8 +706,7 @@ def visit_Call(self, node, side=None, **kwargs):

if node.keywords:
raise TypeError(
'Function "{name}" does not support keyword '
"arguments".format(name=res.name)
f'Function "{res.name}" does not support keyword arguments'
)

return res(*new_args, **kwargs)
Expand All @@ -719,10 +717,7 @@ def visit_Call(self, node, side=None, **kwargs):

for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError(
"keyword error in function call "
"'{func}'".format(func=node.func.id)
)
raise ValueError(f"keyword error in function call '{node.func.id}'")

if key.arg:
kwargs[key.arg] = self.visit(key.value).value
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def _evaluate_numexpr(op, op_str, a, b):
b_value = getattr(b, "values", b)

result = ne.evaluate(
"a_value {op} b_value".format(op=op_str),
f"a_value {op_str} b_value",
local_dict={"a_value": a_value, "b_value": b_value},
casting="safe",
)
Expand Down
14 changes: 7 additions & 7 deletions pandas/core/computation/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,8 +212,8 @@ def __repr__(self) -> str:
Print a generic n-ary operator and its operands using infix notation.
"""
# recurse over the operands
parened = ("({0})".format(pprint_thing(opr)) for opr in self.operands)
return pprint_thing(" {0} ".format(self.op).join(parened))
parened = (f"({pprint_thing(opr)})" for opr in self.operands)
return pprint_thing(f" {self.op} ".join(parened))

@property
def return_type(self):
Expand Down Expand Up @@ -506,8 +506,8 @@ def __init__(self, lhs, rhs, **kwargs):

if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError(
"unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op, lhs.return_type, rhs.return_type)
f"unsupported operand type(s) for {self.op}:"
f" '{lhs.return_type}' and '{rhs.return_type}'"
)

# do not upcast float32s to float64 un-necessarily
Expand Down Expand Up @@ -554,7 +554,7 @@ def __call__(self, env):
return self.func(operand)

def __repr__(self) -> str:
return pprint_thing("{0}({1})".format(self.op, self.operand))
return pprint_thing(f"{self.op}({self.operand})")

@property
def return_type(self) -> np.dtype:
Expand All @@ -580,7 +580,7 @@ def __call__(self, env):

def __repr__(self) -> str:
operands = map(str, self.operands)
return pprint_thing("{0}({1})".format(self.op, ",".join(operands)))
return pprint_thing(f"{self.op}({','.join(operands)})")


class FuncNode:
Expand All @@ -592,7 +592,7 @@ def __init__(self, name: str):
and _NUMEXPR_VERSION < LooseVersion("2.6.9")
and name in ("floor", "ceil")
):
raise ValueError('"{0}" is not a supported function'.format(name))
raise ValueError(f'"{name}" is not a supported function')

self.name = name
self.func = getattr(np, name)
Expand Down
Loading