@@ -1464,9 +1464,9 @@ def extract(r):
1464
1464
for n in range (len (columns [0 ])):
1465
1465
if all (compat .to_str (c [n ]) in self .unnamed_cols for c in columns ):
1466
1466
raise ParserError (
1467
- "Passed header=[%s ] are too many rows for this "
1467
+ "Passed header=[{header} ] are too many rows for this "
1468
1468
"multi_index of columns"
1469
- % ',' .join (str (x ) for x in self .header )
1469
+ . format ( header = ',' .join (str (x ) for x in self .header ) )
1470
1470
)
1471
1471
1472
1472
# Clean the column names (if we have an index_col).
@@ -1499,9 +1499,10 @@ def _maybe_dedup_names(self, names):
1499
1499
counts [col ] = cur_count + 1
1500
1500
1501
1501
if is_potential_mi :
1502
- col = col [:- 1 ] + ('%s.%d' % (col [- 1 ], cur_count ),)
1502
+ col = col [:- 1 ] + ('{col}.{cnt}' .format (
1503
+ col = col [- 1 ], cnt = cur_count ),)
1503
1504
else :
1504
- col = '%s.%d' % (col , cur_count )
1505
+ col = '{col}.{cnt}' . format (col = col , cnt = cur_count )
1505
1506
cur_count = counts [col ]
1506
1507
1507
1508
names [i ] = col
@@ -1548,7 +1549,7 @@ def _get_simple_index(self, data, columns):
1548
1549
def ix (col ):
1549
1550
if not isinstance (col , compat .string_types ):
1550
1551
return col
1551
- raise ValueError ('Index %s invalid' % col )
1552
+ raise ValueError ('Index {col} invalid' . format ( col = col ) )
1552
1553
1553
1554
to_remove = []
1554
1555
index = []
@@ -1572,8 +1573,8 @@ def _get_name(icol):
1572
1573
return icol
1573
1574
1574
1575
if col_names is None :
1575
- raise ValueError (('Must supply column order to use %s as '
1576
- 'index' ) % str ( icol ))
1576
+ raise ValueError (('Must supply column order to use {icol!s} '
1577
+ 'as index' ). format ( icol = icol ))
1577
1578
1578
1579
for i , c in enumerate (col_names ):
1579
1580
if i == icol :
@@ -1688,7 +1689,8 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
1688
1689
1689
1690
result [c ] = cvals
1690
1691
if verbose and na_count :
1691
- print ('Filled %d NA values in column %s' % (na_count , str (c )))
1692
+ print ('Filled {count} NA values in column {c!s}' .format (
1693
+ count = na_count , c = c ))
1692
1694
return result
1693
1695
1694
1696
def _infer_types (self , values , na_values , try_num_bool = True ):
@@ -1789,8 +1791,9 @@ def _cast_types(self, values, cast_type, column):
1789
1791
values = astype_nansafe (values , cast_type ,
1790
1792
copy = True , skipna = True )
1791
1793
except ValueError :
1792
- raise ValueError ("Unable to convert column %s to "
1793
- "type %s" % (column , cast_type ))
1794
+ raise ValueError (
1795
+ "Unable to convert column {column} to type {type}" .format (
1796
+ column = column , type = cast_type ))
1794
1797
return values
1795
1798
1796
1799
def _do_date_conversions (self , names , data ):
@@ -1853,7 +1856,7 @@ def __init__(self, src, **kwds):
1853
1856
1854
1857
if self .names is None :
1855
1858
if self .prefix :
1856
- self .names = ['%s%d' % ( self .prefix , i )
1859
+ self .names = ['{prefix}{i}' . format ( prefix = self .prefix , i = i )
1857
1860
for i in range (self ._reader .table_width )]
1858
1861
else :
1859
1862
self .names = lrange (self ._reader .table_width )
@@ -2255,10 +2258,11 @@ def __init__(self, f, **kwds):
2255
2258
raise ValueError ('Only length-1 decimal markers supported' )
2256
2259
2257
2260
if self .thousands is None :
2258
- self .nonnum = re .compile ('[^-^0-9^%s]+' % self .decimal )
2261
+ self .nonnum = re .compile (
2262
+ '[^-^0-9^{decimal}]+' .format (decimal = self .decimal ))
2259
2263
else :
2260
- self .nonnum = re .compile ('[^-^0-9^%s^%s ]+' % ( self . thousands ,
2261
- self .decimal ))
2264
+ self .nonnum = re .compile ('[^-^0-9^{thousands}^{decimal} ]+' . format (
2265
+ thousands = self . thousands , decimal = self .decimal ))
2262
2266
2263
2267
def _set_no_thousands_columns (self ):
2264
2268
# Create a set of column ids that are not to be stripped of thousands
@@ -2497,8 +2501,8 @@ def _infer_columns(self):
2497
2501
except StopIteration :
2498
2502
if self .line_pos < hr :
2499
2503
raise ValueError (
2500
- 'Passed header=%s but only %d lines in file '
2501
- % (hr , self .line_pos + 1 ))
2504
+ 'Passed header={hr} but only {pos} lines in '
2505
+ 'file' . format (hr = hr , pos = ( self .line_pos + 1 ) ))
2502
2506
2503
2507
# We have an empty file, so check
2504
2508
# if columns are provided. That will
@@ -2539,7 +2543,8 @@ def _infer_columns(self):
2539
2543
2540
2544
while cur_count > 0 :
2541
2545
counts [col ] = cur_count + 1
2542
- col = "%s.%d" % (col , cur_count )
2546
+ col = "{columns}.{count}" .format (
2547
+ columns = col , count = cur_count )
2543
2548
cur_count = counts [col ]
2544
2549
2545
2550
this_columns [i ] = col
@@ -2607,8 +2612,8 @@ def _infer_columns(self):
2607
2612
2608
2613
if not names :
2609
2614
if self .prefix :
2610
- columns = [['%s%d' % ( self . prefix , i )
2611
- for i in range (ncols )]]
2615
+ columns = [['{prefix}{idx}' . format (
2616
+ prefix = self . prefix , idx = i ) for i in range (ncols )]]
2612
2617
else :
2613
2618
columns = [lrange (ncols )]
2614
2619
columns = self ._handle_usecols (columns , columns [0 ])
@@ -3035,8 +3040,9 @@ def _rows_to_cols(self, content):
3035
3040
content .append (l )
3036
3041
3037
3042
for row_num , actual_len in bad_lines :
3038
- msg = ('Expected %d fields in line %d, saw %d' %
3039
- (col_len , row_num + 1 , actual_len ))
3043
+ msg = ('Expected {col_len} fields in line {line}, saw '
3044
+ '{length}' .format (col_len = col_len , line = (row_num + 1 ),
3045
+ length = actual_len ))
3040
3046
if (self .delimiter and
3041
3047
len (self .delimiter ) > 1 and
3042
3048
self .quoting != csv .QUOTE_NONE ):
@@ -3207,8 +3213,9 @@ def _isindex(colspec):
3207
3213
new_name , col , old_names = _try_convert_dates (
3208
3214
converter , colspec , data_dict , orig_names )
3209
3215
if new_name in data_dict :
3210
- raise ValueError ('New date column already in dict %s' %
3211
- new_name )
3216
+ raise ValueError (
3217
+ 'New date column already in dict {name}' .format (
3218
+ name = new_name ))
3212
3219
new_data [new_name ] = col
3213
3220
new_cols .append (new_name )
3214
3221
date_cols .update (old_names )
@@ -3217,8 +3224,8 @@ def _isindex(colspec):
3217
3224
# dict of new name to column list
3218
3225
for new_name , colspec in compat .iteritems (parse_spec ):
3219
3226
if new_name in data_dict :
3220
- raise ValueError ('Date column %s already in dict' %
3221
- new_name )
3227
+ raise ValueError (
3228
+ 'Date column {name} already in dict' . format ( name = new_name ) )
3222
3229
3223
3230
_ , col , old_names = _try_convert_dates (converter , colspec ,
3224
3231
data_dict , orig_names )
@@ -3397,7 +3404,7 @@ def _stringify_na_values(na_values):
3397
3404
# we are like 999 here
3398
3405
if v == int (v ):
3399
3406
v = int (v )
3400
- result .append ("%s .0" % v )
3407
+ result .append ("{value} .0" . format ( value = v ) )
3401
3408
result .append (str (v ))
3402
3409
3403
3410
result .append (v )
@@ -3542,8 +3549,8 @@ def get_rows(self, infer_nrows, skiprows=None):
3542
3549
3543
3550
def detect_colspecs (self , infer_nrows = 100 , skiprows = None ):
3544
3551
# Regex escape the delimiters
3545
- delimiters = '' .join (r'\%s' % x for x in self .delimiter )
3546
- pattern = re .compile ('([^%s ]+)' % delimiters )
3552
+ delimiters = '' .join (r'\{}' . format ( x for x in self .delimiter ) )
3553
+ pattern = re .compile ('([^{} ]+)' . format ( delimiters ) )
3547
3554
rows = self .get_rows (infer_nrows , skiprows )
3548
3555
if not rows :
3549
3556
raise EmptyDataError ("No rows from which to infer column width" )
0 commit comments