Skip to content

CLN: replace %s syntax with .format in io #17660

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 25, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/io/clipboard/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ class PyperclipException(RuntimeError):
class PyperclipWindowsException(PyperclipException):

def __init__(self, message):
message += " (%s)" % ctypes.WinError()
message += " ({err})".format(err=ctypes.WinError())
super(PyperclipWindowsException, self).__init__(message)
72 changes: 43 additions & 29 deletions pandas/io/excel.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def register_writer(klass):
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.%s.writer" % ext,
config.register_option("io.excel.{ext}.writer".format(ext=ext),
engine_name, validator=str)
_writer_extensions.append(ext)

Expand All @@ -190,7 +190,8 @@ def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '%s'" % engine_name)
raise ValueError("No Excel writer '{engine}'"
.format(engine=engine_name))


@Appender(_read_excel_doc)
Expand Down Expand Up @@ -259,7 +260,7 @@ def __init__(self, io, **kwds):
engine = kwds.pop('engine', None)

if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: %s" % engine)
raise ValueError("Unknown engine: {engine}".format(engine=engine))

# If io is a url, want to keep the data as bytes so can't pass
# to get_filepath_or_buffer()
Expand Down Expand Up @@ -445,7 +446,7 @@ def _parse_cell(cell_contents, cell_typ):

for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
print("Reading sheet {sheet}".format(sheet=asheetname))

if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
Expand Down Expand Up @@ -634,7 +635,7 @@ def _conv_value(val):
elif is_bool(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
val = "{val}".format(val=val)
elif is_list_like(val):
val = str(val)

Expand Down Expand Up @@ -697,9 +698,11 @@ def __new__(cls, path, engine=None, **kwargs):
ext = 'xlsx'

try:
engine = config.get_option('io.excel.%s.writer' % ext)
engine = config.get_option('io.excel.{ext}.writer'
.format(ext=ext))
except KeyError:
error = ValueError("No engine for filetype: '%s'" % ext)
error = ValueError("No engine for filetype: '{ext}'"
.format(ext=ext))
raise error
cls = get_writer(engine)

Expand Down Expand Up @@ -787,8 +790,9 @@ def check_extension(cls, ext):
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '%s': '%s'") %
(pprint_thing(cls.engine), pprint_thing(ext)))
msg = (u("Invalid extension for engine '{engine}': '{ext}'")
.format(engine=pprint_thing(cls.engine),
ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
Expand All @@ -813,8 +817,8 @@ class _Openpyxl1Writer(ExcelWriter):
def __init__(self, path, engine=None, **engine_kwargs):
if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver):
raise ValueError('Installed openpyxl is not supported at this '
'time. Use {0}.x.y.'
.format(self.openpyxl_majorver))
'time. Use {majorver}.x.y.'
.format(majorver=self.openpyxl_majorver))
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook

Expand Down Expand Up @@ -854,7 +858,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,

for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
xcell = wks.cell("{col}{row}".format(col=colletter,
row=startrow + cell.row + 1))
if (isinstance(cell.val, compat.string_types) and
xcell.data_type_for_value(cell.val) != xcell.TYPE_STRING):
xcell.set_value_explicit(cell.val)
Expand All @@ -876,10 +881,12 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)

wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
wks.merge_cells('{start}{row}:{end}{mergestart}'
.format(start=cletterstart,
row=startrow + cell.row + 1,
end=cletterend,
mergestart=startrow +
cell.mergestart + 1))

# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
Expand All @@ -895,7 +902,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
xcell = wks.cell("{col}{row}"
.format(col=colletter, row=row))
for field in style.__fields__:
xcell.style.__setattr__(
field, style.__getattribute__(field))
Expand Down Expand Up @@ -955,7 +963,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,

for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks["%s%s" % (colletter, startrow + cell.row + 1)]
xcell = wks["{col}{row}"
.format(col=colletter, row=startrow + cell.row + 1)]
xcell.value = _conv_value(cell.val)
style_kwargs = {}

Expand All @@ -977,10 +986,12 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)

wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
wks.merge_cells('{start}{row}:{end}{mergestart}'
.format(start=cletterstart,
row=startrow + cell.row + 1,
end=cletterend,
mergestart=startrow +
cell.mergestart + 1))

# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
Expand All @@ -996,7 +1007,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks["%s%s" % (colletter, row)]
xcell = wks["{col}{row}"
.format(col=colletter, row=row)]
xcell.style = xcell.style.copy(**style_kwargs)

@classmethod
Expand Down Expand Up @@ -1030,7 +1042,7 @@ def _convert_to_style_kwargs(cls, style_dict):
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{0}'.format(k),
_conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
Expand Down Expand Up @@ -1505,17 +1517,19 @@ def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["%s: %s" % (key, cls._style_to_xlwt(value, False))
it = ["{key}: {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (line_sep).join(it)
out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
it = ["%s %s" % (key, cls._style_to_xlwt(value, False))
it = ["{key} {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (field_sep).join(it)
out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
item = "%s" % item
item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
Expand Down
46 changes: 26 additions & 20 deletions pandas/io/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,14 +439,15 @@ def _parse_tables(self, doc, match, attrs):
unique_tables.add(table)

if not result:
raise ValueError("No tables found matching pattern %r" %
match.pattern)
raise ValueError("No tables found matching pattern {patt!r}"
.format(patt=match.pattern))
return result

def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
raise ValueError('No text parsed from document: %s' % self.io)
raise ValueError('No text parsed from document: {doc}'
.format(doc=self.io))
return raw_text

def _build_doc(self):
Expand All @@ -473,8 +474,8 @@ def _build_xpath_expr(attrs):
if 'class_' in attrs:
attrs['class'] = attrs.pop('class_')

s = [u("@%s=%r") % (k, v) for k, v in iteritems(attrs)]
return u('[%s]') % ' and '.join(s)
s = [u("@{key}={val!r}").format(key=k, val=v) for k, v in iteritems(attrs)]
return u('[{expr}]').format(expr=' and '.join(s))


_re_namespace = {'re': 'http://exslt.org/regular-expressions'}
Expand Down Expand Up @@ -517,8 +518,8 @@ def _parse_tables(self, doc, match, kwargs):

# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
query = '//table//*[re:test(text(), %r)]/ancestor::table'
xpath_expr = u(query) % pattern
query = '//table//*[re:test(text(), {patt!r})]/ancestor::table'
xpath_expr = u(query).format(patt=pattern)

# if any table attributes were given build an xpath expression to
# search for them
Expand All @@ -528,7 +529,8 @@ def _parse_tables(self, doc, match, kwargs):
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)

if not tables:
raise ValueError("No tables found matching regex %r" % pattern)
raise ValueError("No tables found matching regex {patt!r}"
.format(patt=pattern))
return tables

def _build_doc(self):
Expand Down Expand Up @@ -574,8 +576,9 @@ def _build_doc(self):
scheme = parse_url(self.io).scheme
if scheme not in _valid_schemes:
# lxml can't parse it
msg = ('%r is not a valid url scheme, valid schemes are '
'%s') % (scheme, _valid_schemes)
msg = (('{invalid!r} is not a valid url scheme, valid '
'schemes are {valid}')
.format(invalid=scheme, valid=_valid_schemes))
raise ValueError(msg)
else:
# something else happened: maybe a faulty connection
Expand Down Expand Up @@ -670,8 +673,9 @@ def _parser_dispatch(flavor):
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
raise ValueError('%r is not a valid flavor, valid flavors are %s' %
(flavor, valid_parsers))
raise ValueError('{invalid!r} is not a valid flavor, valid flavors '
'are {valid}'
.format(invalid=flavor, valid=valid_parsers))

if flavor in ('bs4', 'html5lib'):
if not _HAS_HTML5LIB:
Expand All @@ -695,7 +699,7 @@ def _parser_dispatch(flavor):


def _print_as_set(s):
return '{%s}' % ', '.join([pprint_thing(el) for el in s])
return '{{arg}}'.format(arg=', '.join([pprint_thing(el) for el in s]))


def _validate_flavor(flavor):
Expand All @@ -705,21 +709,23 @@ def _validate_flavor(flavor):
flavor = flavor,
elif isinstance(flavor, collections.Iterable):
if not all(isinstance(flav, string_types) for flav in flavor):
raise TypeError('Object of type %r is not an iterable of strings' %
type(flavor).__name__)
raise TypeError('Object of type {typ!r} is not an iterable of '
'strings'
.format(typ=type(flavor).__name__))
else:
fmt = '{0!r}' if isinstance(flavor, string_types) else '{0}'
fmt = '{flavor!r}' if isinstance(flavor, string_types) else '{flavor}'
fmt += ' is not a valid flavor'
raise ValueError(fmt.format(flavor))
raise ValueError(fmt.format(flavor=flavor))

flavor = tuple(flavor)
valid_flavors = set(_valid_parsers)
flavor_set = set(flavor)

if not flavor_set & valid_flavors:
raise ValueError('%s is not a valid set of flavors, valid flavors are '
'%s' % (_print_as_set(flavor_set),
_print_as_set(valid_flavors)))
raise ValueError('{invalid} is not a valid set of flavors, valid '
'flavors are {valid}'
.format(invalid=_print_as_set(flavor_set),
valid=_print_as_set(valid_flavors)))
return flavor


Expand Down
23 changes: 12 additions & 11 deletions pandas/io/json/json.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ class SeriesWriter(Writer):
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
"'{orient}'".format(orient=self.orient))


class FrameWriter(Writer):
Expand All @@ -110,11 +110,11 @@ def _format_axes(self):
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
"'{orient}'.".format(orient=self.orient))


class JSONTableWriter(FrameWriter):
Expand All @@ -134,8 +134,9 @@ def __init__(self, obj, orient, date_format, double_precision,

if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='%s'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`" % date_format)
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`"
.format(fmt=date_format))
raise ValueError(msg)

self.schema = build_table_schema(obj)
Expand Down Expand Up @@ -166,8 +167,8 @@ def __init__(self, obj, orient, date_format, double_precision,

def write(self):
data = super(JSONTableWriter, self).write()
serialized = '{{"schema": {}, "data": {}}}'.format(
dumps(self.schema), data)
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
schema=dumps(self.schema), data=data)
return serialized


Expand Down Expand Up @@ -391,8 +392,8 @@ def __init__(self, json, orient, dtype=True, convert_axes=True,
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
raise ValueError('date_unit must be one of {units}'
.format(units=self._STAMP_UNITS))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
Expand All @@ -410,8 +411,8 @@ def check_keys_split(self, decoded):
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
pprint_thing(bad_keys))
raise ValueError(u("JSON data had unexpected key(s): {bad_keys}")
.format(bad_keys=pprint_thing(bad_keys)))

def parse(self):

Expand Down
Loading