Skip to content

CLN: replace %s syntax with .format in missing.py, nanops.py, ops.py #17322

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 26 additions & 15 deletions pandas/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def clean_fill_method(method, allow_nearest=False):
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
msg = ('Invalid fill method. Expecting %s. Got %s' %
(expecting, method))
msg = ('Invalid fill method. Expecting {expecting}. Got {method}'
.format(expecting=expecting, method=method))
raise ValueError(msg)
return method

Expand All @@ -104,8 +104,8 @@ def clean_interp_method(method, **kwargs):
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {0}."
"Got '{1}' instead.".format(valid, method))
raise ValueError("method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method))

return method

Expand Down Expand Up @@ -146,8 +146,10 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
raise ValueError('Invalid limit_direction: expecting one of %r, got '
'%r.' % (valid_limit_directions, limit_direction))
msg = ('Invalid limit_direction: expecting one of {valid!r}, '
'got {invalid!r}.')
raise ValueError(msg.format(valid=valid_limit_directions,
invalid=limit_direction))

from pandas import Series
ys = Series(yvalues)
Expand Down Expand Up @@ -248,7 +250,8 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
# TODO: Why is DatetimeIndex being imported here?
from pandas import DatetimeIndex # noqa
except ImportError:
raise ImportError('{0} interpolation requires Scipy'.format(method))
raise ImportError('{method} interpolation requires SciPy'
.format(method=method))

new_x = np.asarray(new_x)

Expand Down Expand Up @@ -466,7 +469,8 @@ def pad_1d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
name = 'pad_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
Expand All @@ -476,7 +480,8 @@ def pad_1d(values, limit=None, mask=None, dtype=None):
_method = algos.pad_inplace_object

if _method is None:
raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
raise ValueError('Invalid dtype for pad_1d [{name}]'
.format(name=dtype.name))

if mask is None:
mask = isna(values)
Expand All @@ -490,7 +495,8 @@ def backfill_1d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
name = 'backfill_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
Expand All @@ -500,7 +506,8 @@ def backfill_1d(values, limit=None, mask=None, dtype=None):
_method = algos.backfill_inplace_object

if _method is None:
raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
raise ValueError('Invalid dtype for backfill_1d [{name}]'
.format(name=dtype.name))

if mask is None:
mask = isna(values)
Expand All @@ -515,7 +522,8 @@ def pad_2d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
name = 'pad_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
Expand All @@ -525,7 +533,8 @@ def pad_2d(values, limit=None, mask=None, dtype=None):
_method = algos.pad_2d_inplace_object

if _method is None:
raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
raise ValueError('Invalid dtype for pad_2d [{name}]'
.format(name=dtype.name))

if mask is None:
mask = isna(values)
Expand All @@ -544,7 +553,8 @@ def backfill_2d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
name = 'backfill_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
Expand All @@ -554,7 +564,8 @@ def backfill_2d(values, limit=None, mask=None, dtype=None):
_method = algos.backfill_2d_inplace_object

if _method is None:
raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
raise ValueError('Invalid dtype for backfill_2d [{name}]'
.format(name=dtype.name))

if mask is None:
mask = isna(values)
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,8 @@ def __call__(self, f):
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(
f.__name__.replace('nan', '')))
msg = 'reduction operation {name!r} not allowed for this dtype'
raise TypeError(msg.format(name=f.__name__.replace('nan', '')))
try:
with np.errstate(invalid='ignore'):
return f(*args, **kwargs)
Expand Down Expand Up @@ -786,7 +785,8 @@ def _ensure_numeric(x):
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
raise TypeError('Could not convert {value!s} to numeric'
.format(value=x))
return x

# NA-friendly array comparisons
Expand Down
84 changes: 46 additions & 38 deletions pandas/core/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ def _create_methods(arith_method, comp_method, bool_method,

def names(x):
if x[-1] == "_":
return "__%s_" % x
return "__{name}_".format(name=x)
else:
return "__%s__" % x
return "__{name}__".format(name=x)
else:
names = lambda x: x

Expand Down Expand Up @@ -388,8 +388,8 @@ def _validate(self, lvalues, rvalues, name):
if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'):
raise TypeError("can only operate on a timedelta and an "
"integer or a float for division and "
"multiplication, but the operator [%s] was"
"passed" % name)
"multiplication, but the operator [{name}] "
"was passed".format(name=name))

# 2 timedeltas
elif ((self.is_timedelta_lhs and
Expand All @@ -400,9 +400,9 @@ def _validate(self, lvalues, rvalues, name):
if name not in ('__div__', '__rdiv__', '__truediv__',
'__rtruediv__', '__add__', '__radd__', '__sub__',
'__rsub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
" operator [%s] was passed" % name)
raise TypeError("can only operate on a timedeltas for addition"
", subtraction, and division, but the operator"
" [{name}] was passed".format(name=name))

# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
Expand All @@ -411,23 +411,24 @@ def _validate(self, lvalues, rvalues, name):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of "
"a timedelta/DateOffset for addition and "
"subtraction, but the operator [%s] was "
"passed" % name)
"subtraction, but the operator [{name}] was "
"passed".format(name=name))

elif (self.is_datetime_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs)):
if name not in ('__add__', '__radd__', '__rsub__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"with a rhs of a datetime for addition, "
"but the operator [%s] was passed" % name)
"but the operator [{name}] was passed"
.format(name=name))

# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:

if name not in ('__sub__', '__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
" passed" % name)
" subtraction, but the operator [{name}] was"
" passed".format(name=name))

# if tz's must be equal (same or None)
if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None):
Expand All @@ -439,8 +440,8 @@ def _validate(self, lvalues, rvalues, name):

if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"and a datetime for addition, but the "
"operator [%s] was passed" % name)
"and a datetime for addition, but the operator"
" [{name}] was passed".format(name=name))
else:
raise TypeError('cannot operate on a series without a rhs '
'of a series/ndarray of type datetime64[ns] '
Expand Down Expand Up @@ -498,7 +499,7 @@ def _convert_to_array(self, values, name=None, other=None):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
"operation [{name}]".format(name=name))
elif inferred_type == 'floating':
if (isna(values).all() and
name in ('__add__', '__radd__', '__sub__', '__rsub__')):
Expand All @@ -508,8 +509,9 @@ def _convert_to_array(self, values, name=None, other=None):
elif self._is_offset(values):
return values
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
raise TypeError("incompatible type [{dtype}] for a "
"datetime/timedelta operation"
.format(dtype=np.array(values).dtype))

return values

Expand Down Expand Up @@ -866,8 +868,8 @@ def wrapper(self, other, axis=None):
with np.errstate(all='ignore'):
res = na_op(values, other)
if is_scalar(res):
raise TypeError('Could not compare %s type with Series' %
type(other))
raise TypeError('Could not compare {typ} type with Series'
.format(typ=type(other)))

# always return a full value series here
res = _values_from_object(res)
Expand Down Expand Up @@ -906,9 +908,10 @@ def na_op(x, y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
msg = ("cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{type}]"
).format(dtype=x.dtype, type=type(y).__name__)
raise TypeError(msg)

return result

Expand Down Expand Up @@ -1140,14 +1143,17 @@ def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """

def to_series(right):
msg = 'Unable to coerce to Series, length must be {0}: given {1}'
msg = ('Unable to coerce to Series, length must be {req_len}: '
'given {given_len}')
if axis is not None and left._get_axis_name(axis) == 'index':
if len(left.index) != len(right):
raise ValueError(msg.format(len(left.index), len(right)))
raise ValueError(msg.format(req_len=len(left.index),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(msg.format(len(left.columns), len(right)))
raise ValueError(msg.format(req_len=len(left.columns),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.columns)
return right

Expand All @@ -1161,15 +1167,16 @@ def to_series(right):

elif right.ndim == 2:
if left.shape != right.shape:
msg = ("Unable to coerce to DataFrame, "
"shape must be {0}: given {1}")
raise ValueError(msg.format(left.shape, right.shape))
msg = ("Unable to coerce to DataFrame, shape "
"must be {req_shape}: given {given_shape}"
).format(req_shape=left.shape, given_shape=right.shape)
raise ValueError(msg)

right = left._constructor(right, index=left.index,
columns=left.columns)
else:
msg = 'Unable to coerce to Series/DataFrame, dim must be <= 2: {0}'
raise ValueError(msg.format(right.shape, ))
raise ValueError('Unable to coerce to Series/DataFrame, dim '
'must be <= 2: {dim}'.format(dim=right.shape))

return right

Expand Down Expand Up @@ -1278,7 +1285,8 @@ def na_op(x, y):

return result

@Appender('Wrapper for flexible comparison methods %s' % name)
@Appender('Wrapper for flexible comparison methods {name}'
.format(name=name))
def f(self, other, axis=default_axis, level=None):

other = _align_method_FRAME(self, other, axis)
Expand All @@ -1299,7 +1307,7 @@ def f(self, other, axis=default_axis, level=None):


def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method %s' % name)
@Appender('Wrapper for comparison method {name}'.format(name=name))
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
Expand Down Expand Up @@ -1349,9 +1357,9 @@ def na_op(x, y):
# work only for scalars
def f(self, other):
if not is_scalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
raise ValueError('Simple arithmetic with {name} can only be '
'done with scalar values'
.format(name=self._constructor.__name__))

return self._combine(other, op)

Expand Down Expand Up @@ -1384,7 +1392,7 @@ def na_op(x, y):

return result

@Appender('Wrapper for comparison method %s' % name)
@Appender('Wrapper for comparison method {name}'.format(name=name))
def f(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
Expand All @@ -1394,8 +1402,8 @@ def f(self, other, axis=None):
return self._compare_constructor(other, na_op, try_cast=False)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
ABCSeries)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
raise Exception("input needs alignment for this object [{object}]"
.format(object=self._constructor))
else:
return self._combine_const(other, na_op, try_cast=False)

Expand Down