Skip to content

Commit 388c322

Browse files
committed
CLN: replace %s syntax with .format in pandas.core.reshape
Replaced %s syntax with .format in pandas.core.reshape. Additionally, made some of the existing positional .format code more explicit.
1 parent 0f25426 commit 388c322

File tree

5 files changed

+64
-57
lines changed

5 files changed

+64
-57
lines changed

pandas/core/reshape/concat.py

+15-14
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
220220
if isinstance(objs, (NDFrame, compat.string_types)):
221221
raise TypeError('first argument must be an iterable of pandas '
222222
'objects, you passed an object of type '
223-
'"{0}"'.format(type(objs).__name__))
223+
'"{name}"'.format(name=type(objs).__name__))
224224

225225
if join == 'outer':
226226
self.intersect = False
@@ -309,8 +309,8 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
309309

310310
self._is_series = isinstance(sample, Series)
311311
if not 0 <= axis <= sample.ndim:
312-
raise AssertionError("axis must be between 0 and {0}, "
313-
"input was {1}".format(sample.ndim, axis))
312+
raise AssertionError("axis must be between 0 and {ndim}, input was"
313+
" {axis}".format(ndim=sample.ndim, axis=axis))
314314

315315
# if we have mixed ndims, then convert to highest ndim
316316
# creating column numbers as needed
@@ -431,8 +431,8 @@ def _get_new_axes(self):
431431
new_axes[i] = self._get_comb_axis(i)
432432
else:
433433
if len(self.join_axes) != ndim - 1:
434-
raise AssertionError("length of join_axes must not be "
435-
"equal to {0}".format(ndim - 1))
434+
raise AssertionError("length of join_axes must not be equal "
435+
"to {length}".format(length=ndim - 1))
436436

437437
# ufff...
438438
indices = compat.lrange(ndim)
@@ -451,7 +451,8 @@ def _get_comb_axis(self, i):
451451
intersect=self.intersect)
452452
except IndexError:
453453
types = [type(x).__name__ for x in self.objs]
454-
raise TypeError("Cannot concatenate list of %s" % types)
454+
raise TypeError("Cannot concatenate list of {types}"
455+
.format(types=types))
455456

456457
def _get_concat_axis(self):
457458
"""
@@ -470,8 +471,8 @@ def _get_concat_axis(self):
470471
for i, x in enumerate(self.objs):
471472
if not isinstance(x, Series):
472473
raise TypeError("Cannot concatenate type 'Series' "
473-
"with object of type "
474-
"%r" % type(x).__name__)
474+
"with object of type {type!r}"
475+
.format(type=type(x).__name__))
475476
if x.name is not None:
476477
names[i] = x.name
477478
has_names = True
@@ -505,8 +506,8 @@ def _maybe_check_integrity(self, concat_index):
505506
if self.verify_integrity:
506507
if not concat_index.is_unique:
507508
overlap = concat_index.get_duplicates()
508-
raise ValueError('Indexes have overlapping values: %s'
509-
% str(overlap))
509+
raise ValueError('Indexes have overlapping values: '
510+
'{overlap!s}'.format(overlap=overlap))
510511

511512

512513
def _concat_indexes(indexes):
@@ -547,8 +548,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
547548
try:
548549
i = level.get_loc(key)
549550
except KeyError:
550-
raise ValueError('Key %s not in level %s'
551-
% (str(key), str(level)))
551+
raise ValueError('Key {key!s} not in level {level!s}'
552+
.format(key=key, level=level))
552553

553554
to_concat.append(np.repeat(i, len(index)))
554555
label_list.append(np.concatenate(to_concat))
@@ -597,8 +598,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
597598

598599
mask = mapped == -1
599600
if mask.any():
600-
raise ValueError('Values not found in passed level: %s'
601-
% str(hlevel[mask]))
601+
raise ValueError('Values not found in passed level: {hlevel!s}'
602+
.format(hlevel=hlevel[mask]))
602603

603604
new_labels.append(np.repeat(mapped, n))
604605

pandas/core/reshape/merge.py

+28-25
Original file line numberDiff line numberDiff line change
@@ -534,28 +534,27 @@ def __init__(self, left, right, how='inner', on=None,
534534
'indicator option can only accept boolean or string arguments')
535535

536536
if not isinstance(left, DataFrame):
537-
raise ValueError(
538-
'can not merge DataFrame with instance of '
539-
'type {0}'.format(type(left)))
537+
raise ValueError('can not merge DataFrame with instance of '
538+
'type {left}'.format(left=type(left)))
540539
if not isinstance(right, DataFrame):
541-
raise ValueError(
542-
'can not merge DataFrame with instance of '
543-
'type {0}'.format(type(right)))
540+
raise ValueError('can not merge DataFrame with instance of '
541+
'type {right}'.format(right=type(right)))
544542

545543
if not is_bool(left_index):
546544
raise ValueError(
547545
'left_index parameter must be of type bool, not '
548-
'{0}'.format(type(left_index)))
546+
'{left_index}'.format(left_index=type(left_index)))
549547
if not is_bool(right_index):
550548
raise ValueError(
551549
'right_index parameter must be of type bool, not '
552-
'{0}'.format(type(right_index)))
550+
'{right_index}'.format(right_index=type(right_index)))
553551

554552
# warn user when merging between different levels
555553
if left.columns.nlevels != right.columns.nlevels:
556554
msg = ('merging between different levels can give an unintended '
557-
'result ({0} levels on the left, {1} on the right)')
558-
msg = msg.format(left.columns.nlevels, right.columns.nlevels)
555+
'result ({left} levels on the left, {right} on the right)'
556+
).format(left=left.columns.nlevels,
557+
right=right.columns.nlevels)
559558
warnings.warn(msg, UserWarning)
560559

561560
self._validate_specification()
@@ -613,7 +612,8 @@ def _indicator_pre_merge(self, left, right):
613612
for i in ['_left_indicator', '_right_indicator']:
614613
if i in columns:
615614
raise ValueError("Cannot use `indicator=True` option when "
616-
"data contains a column named {}".format(i))
615+
"data contains a column named {name}"
616+
.format(name=i))
617617
if self.indicator_name in columns:
618618
raise ValueError(
619619
"Cannot use name of an existing column for indicator column")
@@ -717,7 +717,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
717717
if name in result:
718718
result[name] = key_col
719719
else:
720-
result.insert(i, name or 'key_%d' % i, key_col)
720+
result.insert(i, name or 'key_{i}'.format(i=i), key_col)
721721

722722
def _get_join_indexers(self):
723723
""" return the join indexers """
@@ -952,8 +952,8 @@ def _validate_specification(self):
952952
if len(common_cols) == 0:
953953
raise MergeError('No common columns to perform merge on')
954954
if not common_cols.is_unique:
955-
raise MergeError("Data columns not unique: %s"
956-
% repr(common_cols))
955+
raise MergeError("Data columns not unique: {common!r}"
956+
.format(common=common_cols))
957957
self.left_on = self.right_on = common_cols
958958
elif self.on is not None:
959959
if self.left_on is not None or self.right_on is not None:
@@ -1119,12 +1119,14 @@ def get_result(self):
11191119

11201120

11211121
def _asof_function(direction, on_type):
1122-
return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None)
1122+
name = 'asof_join_{dir}_{on}'.format(dir=direction, on=on_type)
1123+
return getattr(libjoin, name, None)
11231124

11241125

11251126
def _asof_by_function(direction, on_type, by_type):
1126-
return getattr(libjoin, 'asof_join_%s_%s_by_%s' %
1127-
(direction, on_type, by_type), None)
1127+
name = 'asof_join_{dir}_{on}_by_{by}'.format(
1128+
dir=direction, on=on_type, by=by_type)
1129+
return getattr(libjoin, name, None)
11281130

11291131

11301132
_type_casters = {
@@ -1153,7 +1155,7 @@ def _get_cython_type(dtype):
11531155
type_name = _get_dtype(dtype).name
11541156
ctype = _cython_types.get(type_name, 'object')
11551157
if ctype == 'error':
1156-
raise MergeError('unsupported type: ' + type_name)
1158+
raise MergeError('unsupported type: {type}'.format(type=type_name))
11571159
return ctype
11581160

11591161

@@ -1235,7 +1237,8 @@ def _validate_specification(self):
12351237

12361238
# check 'direction' is valid
12371239
if self.direction not in ['backward', 'forward', 'nearest']:
1238-
raise MergeError('direction invalid: ' + self.direction)
1240+
raise MergeError('direction invalid: {direction}'
1241+
.format(direction=self.direction))
12391242

12401243
@property
12411244
def _asof_key(self):
@@ -1264,7 +1267,7 @@ def _get_merge_keys(self):
12641267
lt = left_join_keys[-1]
12651268

12661269
msg = "incompatible tolerance, must be compat " \
1267-
"with type {0}".format(type(lt))
1270+
"with type {lt}".format(lt=type(lt))
12681271

12691272
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
12701273
if not isinstance(self.tolerance, Timedelta):
@@ -1283,8 +1286,8 @@ def _get_merge_keys(self):
12831286

12841287
# validate allow_exact_matches
12851288
if not is_bool(self.allow_exact_matches):
1286-
raise MergeError("allow_exact_matches must be boolean, "
1287-
"passed {0}".format(self.allow_exact_matches))
1289+
msg = "allow_exact_matches must be boolean, passed {passed}"
1290+
raise MergeError(msg.format(passed=self.allow_exact_matches))
12881291

12891292
return left_join_keys, right_join_keys, join_names
12901293

@@ -1306,11 +1309,11 @@ def flip(xs):
13061309
tolerance = self.tolerance
13071310

13081311
# we required sortedness in the join keys
1309-
msg = " keys must be sorted"
1312+
msg = "{side} keys must be sorted"
13101313
if not Index(left_values).is_monotonic:
1311-
raise ValueError('left' + msg)
1314+
raise ValueError(msg.format(side='left'))
13121315
if not Index(right_values).is_monotonic:
1313-
raise ValueError('right' + msg)
1316+
raise ValueError(msg.format(side='right'))
13141317

13151318
# initial type conversion as needed
13161319
if needs_i8_conversion(left_values):

pandas/core/reshape/pivot.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -145,18 +145,18 @@ def _add_margins(table, data, values, rows, cols, aggfunc,
145145
if not isinstance(margins_name, compat.string_types):
146146
raise ValueError('margins_name argument must be a string')
147147

148-
exception_msg = 'Conflicting name "{0}" in margins'.format(margins_name)
148+
msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
149149
for level in table.index.names:
150150
if margins_name in table.index.get_level_values(level):
151-
raise ValueError(exception_msg)
151+
raise ValueError(msg)
152152

153153
grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)
154154

155155
# could be passed a Series object with no 'columns'
156156
if hasattr(table, 'columns'):
157157
for level in table.columns.names[1:]:
158158
if margins_name in table.columns.get_level_values(level):
159-
raise ValueError(exception_msg)
159+
raise ValueError(msg)
160160

161161
if len(rows) > 1:
162162
key = (margins_name,) + ('',) * (len(rows) - 1)
@@ -553,7 +553,7 @@ def _get_names(arrs, names, prefix='row'):
553553
if isinstance(arr, ABCSeries) and arr.name is not None:
554554
names.append(arr.name)
555555
else:
556-
names.append('%s_%d' % (prefix, i))
556+
names.append('{prefix}_{i}'.format(prefix=prefix, i=i))
557557
else:
558558
if len(names) != len(arrs):
559559
raise AssertionError('arrays and names must have the same length')

pandas/core/reshape/reshape.py

+15-12
Original file line numberDiff line numberDiff line change
@@ -91,8 +91,8 @@ def __init__(self, values, index, level=-1, value_columns=None,
9191

9292
if isinstance(self.index, MultiIndex):
9393
if index._reference_duplicate_name(level):
94-
msg = ("Ambiguous reference to {0}. The index "
95-
"names are not unique.".format(level))
94+
msg = ("Ambiguous reference to {level}. The index "
95+
"names are not unique.".format(level=level))
9696
raise ValueError(msg)
9797

9898
self.level = self.index._get_level_number(level)
@@ -229,7 +229,7 @@ def get_new_values(self):
229229
sorted_values = sorted_values.astype(name, copy=False)
230230

231231
# fill in our values & mask
232-
f = getattr(_reshape, "unstack_{}".format(name))
232+
f = getattr(_reshape, "unstack_{name}".format(name=name))
233233
f(sorted_values,
234234
mask.view('u1'),
235235
stride,
@@ -516,8 +516,8 @@ def factorize(index):
516516
N, K = frame.shape
517517
if isinstance(frame.columns, MultiIndex):
518518
if frame.columns._reference_duplicate_name(level):
519-
msg = ("Ambiguous reference to {0}. The column "
520-
"names are not unique.".format(level))
519+
msg = ("Ambiguous reference to {level}. The column "
520+
"names are not unique.".format(level=level))
521521
raise ValueError(msg)
522522

523523
# Will also convert negative level numbers and check if out of bounds.
@@ -747,7 +747,7 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None,
747747
if len(frame.columns.names) == len(set(frame.columns.names)):
748748
var_name = frame.columns.names
749749
else:
750-
var_name = ['variable_%s' % i
750+
var_name = ['variable_{i}'.format(i=i)
751751
for i in range(len(frame.columns.names))]
752752
else:
753753
var_name = [frame.columns.name if frame.columns.name is not None
@@ -1027,7 +1027,8 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'):
10271027
in a typicaly case.
10281028
"""
10291029
def get_var_names(df, stub, sep, suffix):
1030-
regex = "^{0}{1}{2}".format(re.escape(stub), re.escape(sep), suffix)
1030+
regex = "^{stub}{sep}{suffix}".format(
1031+
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix)
10311032
return df.filter(regex=regex).columns.tolist()
10321033

10331034
def melt_stub(df, stub, i, j, value_vars, sep):
@@ -1180,13 +1181,14 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
11801181

11811182
# validate prefixes and separator to avoid silently dropping cols
11821183
def check_len(item, name):
1183-
length_msg = ("Length of '{0}' ({1}) did not match the length of "
1184-
"the columns being encoded ({2}).")
1184+
len_msg = ("Length of '{name}' ({len_item}) did not match the "
1185+
"length of the columns being encoded ({len_enc}).")
11851186

11861187
if is_list_like(item):
11871188
if not len(item) == len(columns_to_encode):
1188-
raise ValueError(length_msg.format(name, len(item),
1189-
len(columns_to_encode)))
1189+
len_msg = len_msg.format(name=name, len_item=len(item),
1190+
len_enc=len(columns_to_encode))
1191+
raise ValueError(len_msg)
11901192

11911193
check_len(prefix, 'prefix')
11921194
check_len(prefix_sep, 'prefix_sep')
@@ -1253,7 +1255,8 @@ def get_empty_Frame(data, sparse):
12531255
number_of_cols = len(levels)
12541256

12551257
if prefix is not None:
1256-
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels]
1258+
dummy_cols = [u'{pfx}{sep}{v}'.format(pfx=prefix, sep=prefix_sep, v=v)
1259+
for v in levels]
12571260
else:
12581261
dummy_cols = levels
12591262

pandas/core/reshape/tile.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -229,9 +229,9 @@ def _bins_to_cuts(x, bins, right=True, labels=None,
229229
unique_bins = algos.unique(bins)
230230
if len(unique_bins) < len(bins) and len(bins) != 2:
231231
if duplicates == 'raise':
232-
raise ValueError("Bin edges must be unique: {}.\nYou "
232+
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
233233
"can drop duplicate edges by setting "
234-
"the 'duplicates' kwarg".format(repr(bins)))
234+
"the 'duplicates' kwarg".format(bins=bins))
235235
else:
236236
bins = unique_bins
237237

0 commit comments

Comments
 (0)