Skip to content

STY: change .format() to fstring #30116

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 6, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 5 additions & 12 deletions pandas/core/arrays/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,8 +520,8 @@ def fillna(self, value=None, method=None, limit=None):
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
"Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self))
f"Length of 'value' does not match. Got ({len(value)}) "
f"expected {len(self)}"
)
value = value[mask]

Expand Down Expand Up @@ -918,17 +918,14 @@ def view(self, dtype=None) -> Union[ABCExtensionArray, np.ndarray]:
def __repr__(self) -> str:
from pandas.io.formats.printing import format_object_summary

template = "{class_name}{data}\nLength: {length}, dtype: {dtype}"
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = format_object_summary(
self, self._formatter(), indent_for_name=False
).rstrip(", \n")
class_name = "<{}>\n".format(type(self).__name__)
return template.format(
class_name=class_name, data=data, length=len(self), dtype=self.dtype
)
class_name = f"<{type(self).__name__}>\n"
return f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}"

def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]:
"""Formatting function for scalar values.
Expand Down Expand Up @@ -1044,11 +1041,7 @@ def _reduce(self, name, skipna=True, **kwargs):
------
TypeError : subclass does not define reductions
"""
raise TypeError(
"cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype
)
)
raise TypeError(f"cannot perform {name} with type {self.dtype}")


class ExtensionOpsMixin:
Expand Down
79 changes: 28 additions & 51 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,10 +470,11 @@ def __setitem__(
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
"shape mismatch: value array of length '{}' does "
"not match indexing result of length '{}'."
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg.format(len(key), len(value)))
raise ValueError(msg)
elif not len(key):
return

Expand All @@ -487,12 +488,10 @@ def __setitem__(
value = iNaT
else:
msg = (
"'value' should be a '{scalar}', 'NaT', or array of those. "
"Got '{typ}' instead."
)
raise TypeError(
msg.format(scalar=self._scalar_type.__name__, typ=type(value).__name__)
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()

Expand Down Expand Up @@ -532,8 +531,8 @@ def astype(self, dtype, copy=True):
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = "Cannot cast {name} to dtype {dtype}"
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
Expand Down Expand Up @@ -637,9 +636,7 @@ def searchsorted(self, value, side="left", sorter=None):
value = self._scalar_from_string(value)

if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(
"Unexpected type for 'value': {valtype}".format(valtype=type(value))
)
raise ValueError(f"Unexpected type for 'value': {type(value)}")

self._check_compatible_with(value)
if isinstance(value, type(self)):
Expand Down Expand Up @@ -759,8 +756,8 @@ def fillna(self, value=None, method=None, limit=None):
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
"Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self))
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]

Expand Down Expand Up @@ -880,10 +877,8 @@ def _validate_frequency(cls, index, freq, **kwargs):
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError(
"Inferred frequency {infer} from passed values "
"does not conform to passed frequency {passed}".format(
infer=inferred, passed=freq.freqstr
)
f"Inferred frequency {inferred} from passed values "
f"does not conform to passed frequency {freq.freqstr}"
)

# monotonicity/uniqueness properties are called via frequencies.infer_freq,
Expand Down Expand Up @@ -922,27 +917,21 @@ def _is_unique(self):
def _add_datetimelike_scalar(self, other):
# Overriden by TimedeltaArray
raise TypeError(
"cannot add {cls} and {typ}".format(
cls=type(self).__name__, typ=type(other).__name__
)
f"cannot add {type(self).__name__} and " f"{type(other).__name__}"
)

_add_datetime_arraylike = _add_datetimelike_scalar

def _sub_datetimelike_scalar(self, other):
# Overridden by DatetimeArray
assert other is not NaT
raise TypeError(
"cannot subtract a datelike from a {cls}".format(cls=type(self).__name__)
)
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")

_sub_datetime_arraylike = _sub_datetimelike_scalar

def _sub_period(self, other):
# Overriden by PeriodArray
raise TypeError(
"cannot subtract Period from a {cls}".format(cls=type(self).__name__)
)
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")

def _add_offset(self, offset):
raise AbstractMethodError(self)
Expand Down Expand Up @@ -1022,9 +1011,7 @@ def _add_nat(self):
"""
if is_period_dtype(self):
raise TypeError(
"Cannot add {cls} and {typ}".format(
cls=type(self).__name__, typ=type(NaT).__name__
)
f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
)

# GH#19124 pd.NaT is treated like a timedelta for both timedelta
Expand Down Expand Up @@ -1064,9 +1051,7 @@ def _sub_period_array(self, other):
"""
if not is_period_dtype(self):
raise TypeError(
"cannot subtract {dtype}-dtype from {cls}".format(
dtype=other.dtype, cls=type(self).__name__
)
f"cannot subtract {other.dtype}-dtype from {type(self).__name__}"
)

if len(self) != len(other):
Expand Down Expand Up @@ -1140,7 +1125,7 @@ def _addsub_offset_array(self, other, op):

warnings.warn(
"Adding/subtracting array of DateOffsets to "
"{cls} not vectorized".format(cls=type(self).__name__),
f"{type(self).__name__} not vectorized",
PerformanceWarning,
)

Expand Down Expand Up @@ -1313,17 +1298,11 @@ def __rsub__(self, other):
# GH#19959 datetime - datetime is well-defined as timedelta,
# but any other type - datetime is not well-defined.
raise TypeError(
"cannot subtract {cls} from {typ}".format(
cls=type(self).__name__, typ=type(other).__name__
)
f"cannot subtract {type(self).__name__} from {type(other).__name__}"
)
elif is_period_dtype(self.dtype) and is_timedelta64_dtype(other):
# TODO: Can we simplify/generalize these cases at all?
raise TypeError(
"cannot subtract {cls} from {dtype}".format(
cls=type(self).__name__, dtype=other.dtype
)
)
raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
elif is_timedelta64_dtype(self.dtype):
if lib.is_integer(other) or is_integer_dtype(other):
# need to subtract before negating, since that flips freq
Expand Down Expand Up @@ -1472,9 +1451,9 @@ def mean(self, skipna=True):
if is_period_dtype(self):
# See discussion in GH#24757
raise TypeError(
"mean is not implemented for {cls} since the meaning is "
"ambiguous. An alternative is "
"obj.to_timestamp(how='start').mean()".format(cls=type(self).__name__)
f"mean is not implemented for {type(self).__name__} since the "
"meaning is ambiguous. An alternative is "
"obj.to_timestamp(how='start').mean()"
)

mask = self.isna()
Expand Down Expand Up @@ -1520,9 +1499,7 @@ def validate_periods(periods):
if lib.is_float(periods):
periods = int(periods)
elif not lib.is_integer(periods):
raise TypeError(
"periods must be a number, got {periods}".format(periods=periods)
)
raise TypeError(f"periods must be a number, got {periods}")
return periods


Expand Down Expand Up @@ -1583,9 +1560,9 @@ def validate_inferred_freq(freq, inferred_freq, freq_infer):
if inferred_freq is not None:
if freq is not None and freq != inferred_freq:
raise ValueError(
"Inferred frequency {inferred} from passed "
f"Inferred frequency {inferred_freq} from passed "
"values does not conform to passed frequency "
"{passed}".format(inferred=inferred_freq, passed=freq.freqstr)
f"{freq.freqstr}"
)
elif freq is None:
freq = inferred_freq
Expand Down
40 changes: 17 additions & 23 deletions pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def _dt_array_cmp(cls, op):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
opname = "__{name}__".format(name=op.__name__)
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"

@unpack_zerodim_and_defer(opname)
Expand Down Expand Up @@ -338,9 +338,9 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
"'{}' != '{}'"
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg.format(dtz, values.tz))
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
# freq = validate_values_freq(values, freq)
Expand All @@ -350,10 +350,11 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):

if not isinstance(values, np.ndarray):
msg = (
"Unexpected type '{}'. 'values' must be a DatetimeArray "
"ndarray, or Series or Index containing one of those."
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray ndarray, or Series or Index containing one of"
" those."
)
raise ValueError(msg.format(type(values).__name__))
raise ValueError(msg)
if values.ndim != 1:
raise ValueError("Only 1-dimensional input arrays are supported.")

Expand All @@ -366,9 +367,9 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
if values.dtype != _NS_DTYPE:
msg = (
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'."
" Got {} instead."
f" Got {values.dtype} instead."
)
raise ValueError(msg.format(values.dtype))
raise ValueError(msg)

dtype = _validate_dt64_dtype(dtype)

Expand Down Expand Up @@ -577,11 +578,7 @@ def _check_compatible_with(self, other):
if other is NaT:
return
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(
"Timezones don't match. '{own} != {other}'".format(
own=self.tz, other=other.tz
)
)
raise ValueError(f"Timezones don't match. '{self.tz} != {other.tz}'")

def _maybe_clear_freq(self):
self._freq = None
Expand Down Expand Up @@ -732,10 +729,7 @@ def _validate_fill_value(self, fill_value):
self._assert_tzawareness_compat(fill_value)
fill_value = Timestamp(fill_value).value
else:
raise ValueError(
"'fill_value' should be a Timestamp. "
"Got '{got}'.".format(got=fill_value)
)
raise ValueError(f"'fill_value' should be a Timestamp. Got '{fill_value}'.")
return fill_value

# -----------------------------------------------------------------
Expand Down Expand Up @@ -799,8 +793,8 @@ def _sub_datetime_arraylike(self, other):
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"{cls} subtraction must have the same "
"timezones or no timezones".format(cls=type(self).__name__)
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)

self_i8 = self.asi8
Expand Down Expand Up @@ -2119,8 +2113,8 @@ def maybe_infer_tz(tz, inferred_tz):
pass
elif not timezones.tz_compare(tz, inferred_tz):
raise TypeError(
"data is already tz-aware {inferred_tz}, unable to "
"set specified tz: {tz}".format(inferred_tz=inferred_tz, tz=tz)
f"data is already tz-aware {inferred_tz}, unable to "
f"set specified tz: {tz}"
)
return tz

Expand Down Expand Up @@ -2164,8 +2158,8 @@ def _validate_dt64_dtype(dtype):
dtype, (np.dtype, DatetimeTZDtype)
):
raise ValueError(
"Unexpected value for 'dtype': '{dtype}'. "
"Must be 'datetime64[ns]' or DatetimeTZDtype'.".format(dtype=dtype)
f"Unexpected value for 'dtype': '{dtype}'. "
"Must be 'datetime64[ns]' or DatetimeTZDtype'."
)
return dtype

Expand Down