Skip to content

Replace "foo!r" to "repr(foo)" syntax #29886 #30502

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Dec 27, 2019
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 24 additions & 28 deletions pandas/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,17 +79,17 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run {dispcmd}".format(dispcmd=dispcmd))
print(f"unable to run {dispcmd}")
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
print(f"unable to find command, tried {commands}")
return None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run {dispcmd} (error)".format(dispcmd=dispcmd))
print(f"unable to run {dispcmd} (error)")
return None
return stdout

Expand All @@ -101,10 +101,8 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
if not dirname.startswith(parentdir_prefix):
if verbose:
print(
"guessing rootdir is '{root}', but '{dirname}' "
"doesn't start with prefix '{parentdir_prefix}'".format(
root=root, dirname=dirname, parentdir_prefix=parentdir_prefix
)
f"guessing rootdir is '{root}', but '{dirname}' "
f"doesn't start with prefix '{parentdir_prefix}'"
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {
Expand Down Expand Up @@ -163,15 +161,15 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '{}', no digits".format(",".join(refs - tags)))
print(f"discarding '{','.join(refs - tags)}', no digits")
if verbose:
print("likely tags: {}".format(",".join(sorted(tags))))
print(f"likely tags: {','.join(sorted(tags))}")
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking {r}".format(r=r))
print(f"picking {r}")
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
Expand All @@ -198,7 +196,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):

if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in {root}".format(root=root))
print(f"no .git in {root}")
raise NotThisMethod("no .git directory")

GITS = ["git"]
Expand Down Expand Up @@ -241,16 +239,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: "
"'{describe_out}'".format(describe_out=describe_out)
"unable to parse git-describe output: " f"'{describe_out}'"
)
return pieces

# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
fmt = "tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
msg = fmt.format(full_tag=full_tag, tag_prefix=tag_prefix)
msg = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
if verbose:
print(msg)
pieces["error"] = msg
Expand Down Expand Up @@ -291,12 +287,12 @@ def render_pep440(pieces):
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "{:d}.g{}".format(pieces["distance"], pieces["short"])
rendered += f"{pieces['distance']:d}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.{:d}.g{}".format(pieces["distance"], pieces["short"])
rendered = f"0+untagged.{pieces['distance']:d}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
return rendered
Expand All @@ -311,10 +307,10 @@ def render_pep440_pre(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
rendered += f".post.dev{pieces['distance']}"
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
rendered = f"0.post.dev{pieces['distance']}"
return rendered


Expand All @@ -330,17 +326,17 @@ def render_pep440_post(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post{:d}".format(pieces["distance"])
rendered += f".post{pieces['distance']:d}"
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g{}".format(pieces["short"])
rendered += f"g{pieces['short']}"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
rendered = f"0.pos{pieces['distance']:d}"
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g{}".format(pieces["short"])
rendered += f"+g{pieces['short']}"
return rendered


Expand All @@ -353,12 +349,12 @@ def render_pep440_old(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
rendered += f".post{pieces['distance']}"
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
rendered = f"0.post{pieces['distance']}"
if pieces["dirty"]:
rendered += ".dev0"
return rendered
Expand All @@ -374,7 +370,7 @@ def render_git_describe(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
rendered += f"-{pieces['distance']:d}-g{pieces['short']}"
else:
# exception #1
rendered = pieces["short"]
Expand All @@ -392,7 +388,7 @@ def render_git_describe_long(pieces):

if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
rendered += f"-{pieces['distance']:d}-g{pieces['short']}"
else:
# exception #1
rendered = pieces["short"]
Expand Down Expand Up @@ -426,7 +422,7 @@ def render(pieces, style):
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '{style}'".format(style=style))
raise ValueError(f"unknown style '{style}'")

return {
"version": rendered,
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/style.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def _translate(self):
BLANK_VALUE = ""

def format_attr(pair):
return "{key}={value}".format(**pair)
return f"{pair['key']}={pair['value']}"

# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexes/period/test_period.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def test_difference_freq(self, sort):

def test_hash_error(self):
index = period_range("20010101", periods=10)
msg = "unhashable type: '{}'".format(type(index).__name__)
msg = f"unhashable type: '{type(index).__name__}'"
with pytest.raises(TypeError, match=msg):
hash(index)

Expand Down
46 changes: 16 additions & 30 deletions pandas/tests/io/test_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,7 @@ def teardown_method(self, method):
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(
"DROP TABLE IF EXISTS {}".format(sql._get_valid_mysql_name(table_name))
)
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()

def _get_all_tables(self):
Expand All @@ -237,7 +235,7 @@ def _close_conn(self):
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
"DROP TABLE IF EXISTS {}".format(sql._get_valid_sqlite_name(table_name))
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()

Expand Down Expand Up @@ -405,11 +403,7 @@ def _load_raw_sql(self):
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(
"SELECT count(*) AS count_1 FROM {table_name}".format(
table_name=table_name
)
)
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
Expand Down Expand Up @@ -1207,7 +1201,7 @@ def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError("Column {column} not found".format(column=column))
raise ValueError(f"Column {column} not found")

def test_sqlite_type_mapping(self):

Expand Down Expand Up @@ -1272,7 +1266,7 @@ def setup_connect(self):
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip("Can't connect to {0} server".format(self.flavor))
pytest.skip(f"Can't connect to {self.flavor} server")

def test_read_sql(self):
self._read_sql_iris()
Expand Down Expand Up @@ -1414,7 +1408,7 @@ def check(col):

else:
raise AssertionError(
"DateCol loaded with incorrect type -> {0}".format(col.dtype)
f"DateCol loaded with incorrect type -> {col.dtype}"
)

# GH11216
Expand Down Expand Up @@ -2051,15 +2045,13 @@ def psql_insert_copy(table, conn, keys, data_iter):
writer.writerows(data_iter)
s_buf.seek(0)

columns = ", ".join('"{}"'.format(k) for k in keys)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = "{}.{}".format(table.schema, table.name)
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name

sql_query = "COPY {} ({}) FROM STDIN WITH CSV".format(
table_name, columns
)
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)

expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
Expand Down Expand Up @@ -2199,14 +2191,12 @@ def test_datetime_time(self):
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ "AND tbl_name = '{tbl_name}'".format(tbl_name=tbl_name),
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info({ix_name})".format(ix_name=ix_name), self.conn
)
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols

Expand All @@ -2217,15 +2207,11 @@ def test_transactions(self):
self._transaction_test()

def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute("PRAGMA table_info({table})".format(table=table))
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(
"Table {table}, column {column} not found".format(
table=table, column=column
)
)
raise ValueError(f"Table {table}, column {column} not found")

def test_dtype(self):
if self.flavor == "mysql":
Expand Down Expand Up @@ -2295,7 +2281,7 @@ def test_illegal_names(self):
sql.table_exists(weird_name, self.conn)

df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = "test_weird_col_name{ndx:d}".format(ndx=ndx)
c_tbl = f"test_weird_col_name{ndx:d}"
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)

Expand Down Expand Up @@ -2500,7 +2486,7 @@ def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = "SELECT * FROM {table_name}".format(table_name=table_name)
sql_select = f"SELECT * FROM {table_name}"

def clean_up(test_table_to_drop):
"""
Expand Down Expand Up @@ -2788,7 +2774,7 @@ def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = "SELECT * FROM {table_name}".format(table_name=table_name)
sql_select = f"SELECT * FROM {table_name}"

def clean_up(test_table_to_drop):
"""
Expand Down
6 changes: 3 additions & 3 deletions pandas/tests/scalar/timedelta/test_timedelta.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ def test_unit_parser(self, units, np_unit, wrapper):
[np.timedelta64(i, "m") for i in np.arange(5).tolist()]
)

str_repr = ["{}{}".format(x, unit) for x in np.arange(5)]
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(str_repr))
Expand All @@ -416,9 +416,9 @@ def test_unit_parser(self, units, np_unit, wrapper):
if unit == "M":
expected = Timedelta(np.timedelta64(2, "m").astype("timedelta64[ns]"))

result = to_timedelta("2{}".format(unit))
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta("2{}".format(unit))
result = Timedelta(f"2{unit}")
assert result == expected

@pytest.mark.parametrize("unit", ["Y", "y", "M"])
Expand Down
8 changes: 4 additions & 4 deletions pandas/tests/series/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ def test_datetime64_tz_fillna(self):
["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz=tz
)
s = pd.Series(idx)
assert s.dtype == "datetime64[ns, {0}]".format(tz)
assert s.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(pd.isna(s), null_loc)

result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
Expand Down Expand Up @@ -1284,7 +1284,7 @@ def test_interpolate_invalid_float_limit(self, nontemporal_method):
def test_interp_invalid_method(self, invalid_method):
s = Series([1, 3, np.nan, 12, np.nan, 25])

msg = "method must be one of.* Got '{}' instead".format(invalid_method)
msg = f"method must be one of.* Got '{invalid_method}' instead"
with pytest.raises(ValueError, match=msg):
s.interpolate(method=invalid_method)

Expand Down Expand Up @@ -1608,9 +1608,9 @@ def test_interp_non_timedelta_index(self, interp_methods_ind, ind):
else:
expected_error = (
"Index column must be numeric or datetime type when "
"using {method} method other than linear. "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating.".format(method=method)
"interpolating."
)
with pytest.raises(ValueError, match=expected_error):
df[0].interpolate(method=method, **kwargs)
Expand Down