Skip to content

STYLE/PERF: replace string concatenations with f-strings in core #49518

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 7, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -1882,7 +1882,7 @@ def _repr_categories_info(self) -> str:
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
linesep = f"{sep.rstrip()}\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
Expand All @@ -1893,7 +1893,7 @@ def _repr_categories_info(self) -> str:
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
return f"{levheader}[{levstring.replace(' < ... < ', ' ... ')}]"

def _repr_footer(self) -> str:
info = self._repr_categories_info()
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/masked.py
Original file line number Diff line number Diff line change
Expand Up @@ -1058,7 +1058,7 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
data = self.to_numpy("float64", na_value=np.nan)

# median, var, std, skew, kurt, idxmin, idxmax
op = getattr(nanops, "nan" + name)
op = getattr(nanops, f"nan{name}")
result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs)

if np.isnan(result):
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/arrays/string_arrow.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,11 +317,11 @@ def _str_contains(
return result

def _str_startswith(self, pat: str, na=None):
pat = "^" + re.escape(pat)
pat = f"^{re.escape(pat)}"
return self._str_contains(pat, na=na, regex=True)

def _str_endswith(self, pat: str, na=None):
pat = re.escape(pat) + "$"
pat = f"{re.escape(pat)}$"
return self._str_contains(pat, na=na, regex=True)

def _str_replace(
Expand All @@ -345,14 +345,14 @@ def _str_match(
self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None
):
if not pat.startswith("^"):
pat = "^" + pat
pat = f"^{pat}"
return self._str_contains(pat, case, flags, na, regex=True)

def _str_fullmatch(
self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None
):
if not pat.endswith("$") or pat.endswith("//$"):
pat = pat + "$"
pat = f"{pat}$"
return self._str_match(pat, case, flags, na)

def _str_isalnum(self):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ def visit(self, node, **kwargs):
e.msg = "Python keyword not valid identifier in numexpr query"
raise e

method = "visit_" + type(node).__name__
method = f"visit_{type(node).__name__}"
visitor = getattr(self, method)
return visitor(node, **kwargs)

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def create_valid_python_identifier(name: str) -> str:
)

name = "".join([special_characters_replacements.get(char, char) for char in name])
name = "BACKTICK_QUOTED_STRING_" + name
name = f"BACKTICK_QUOTED_STRING_{name}"

if not name.isidentifier():
raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.")
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/scope.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def _get_vars(self, stack, scopes: list[str]) -> None:
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, "f_" + scope)
d = getattr(frame, f"f_{scope}")
self.scope = DeepChainMap(self.scope.new_child(d))
finally:
# won't remove it, but DECREF it
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/dtypes/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -802,7 +802,7 @@ def __hash__(self) -> int:
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
if other.startswith("M8["):
other = "datetime64[" + other[3:]
other = f"datetime64[{other[3:]}"
return other == self.name

return (
Expand Down Expand Up @@ -1132,7 +1132,7 @@ def __new__(cls, subtype=None, closed: str_type | None = None):
)
raise TypeError(msg)

key = str(subtype) + str(closed)
key = f"{subtype}{closed}"
try:
return cls._cache_dtypes[key]
except KeyError:
Expand Down
5 changes: 2 additions & 3 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1095,7 +1095,7 @@ def _repr_html_(self) -> str | None:
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"&lt;", 1)
val = val.replace(">", r"&gt;", 1)
return "<pre>" + val + "</pre>"
return f"<pre>{val}</pre>"

if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
Expand Down Expand Up @@ -8845,8 +8845,7 @@ def explode(
if not self.columns.is_unique:
duplicate_cols = self.columns[self.columns.duplicated()].tolist()
raise ValueError(
"DataFrame columns must be unique. "
+ f"Duplicate columns: {duplicate_cols}"
f"DataFrame columns must be unique. Duplicate columns: {duplicate_cols}"
)

columns: list[Hashable]
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1533,9 +1533,9 @@ def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)

elif hasattr(nanops, "nan" + func):
elif hasattr(nanops, f"nan{func}"):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
f = getattr(nanops, f"nan{func}")

else:
raise ValueError(
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def _format_attrs(self):
attrs = [
(
"categories",
"[" + ", ".join(self._data._repr_categories()) + "]",
f"[{', '.join(self._data._repr_categories())}]",
),
("ordered", self.ordered),
]
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,7 @@ def _format_native_types(
def _format_data(self, name=None) -> str:
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
return self._data._format_data() + "," + self._format_space()
return f"{self._data._format_data()},{self._format_space()}"

# --------------------------------------------------------------------
# Set Operations
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/interchange/column.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]:
return buffer, dtype

try:
msg = _NO_VALIDITY_BUFFER[null] + " so does not have a separate mask"
msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask"
except KeyError:
# TODO: implement for other bit/byte masks?
raise NotImplementedError("See self.describe_null")
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1043,7 +1043,7 @@ def nansem(


def _nanminmax(meth, fill_value_typ):
@bottleneck_switch(name="nan" + meth)
@bottleneck_switch(name=f"nan{meth}")
@_datetimelike_compat
def reduction(
values: np.ndarray,
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/ops/docstrings.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ def make_flex_doc(op_name: str, typ: str) -> str:
op_desc_op = op_desc["op"]
assert op_desc_op is not None # for mypy
if op_name.startswith("r"):
equiv = "other " + op_desc_op + " " + typ
equiv = f"other {op_desc_op} {typ}"
elif op_name == "divmod":
equiv = f"{op_name}({typ}, other)"
else:
equiv = typ + " " + op_desc_op + " other"
equiv = f"{typ} {op_desc_op} other"

if typ == "series":
base_doc = _flex_doc_SERIES
Expand Down