Skip to content

Commit 8ff2ebd

Browse files
ShaharNavehWillAyd
authored andcommitted
STY: concat strings (#30991)
1 parent 2075539 commit 8ff2ebd

20 files changed

+47
-87
lines changed

pandas/compat/__init__.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -110,8 +110,7 @@ def _import_lzma():
110110
return lzma
111111
except ImportError:
112112
msg = (
113-
"Could not import the lzma module. "
114-
"Your installed Python is incomplete. "
113+
"Could not import the lzma module. Your installed Python is incomplete. "
115114
"Attempting to use lzma compression will result in a RuntimeError."
116115
)
117116
warnings.warn(msg)

pandas/compat/numpy/__init__.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,9 @@
1818

1919
if _nlv < "1.13.3":
2020
raise ImportError(
21-
f"this version of pandas is incompatible with "
22-
f"numpy < 1.13.3\n"
21+
"this version of pandas is incompatible with numpy < 1.13.3\n"
2322
f"your numpy version is {_np_version}.\n"
24-
f"Please upgrade numpy to >= 1.13.3 to use "
25-
f"this pandas version"
23+
"Please upgrade numpy to >= 1.13.3 to use this pandas version"
2624
)
2725

2826

pandas/compat/numpy/function.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -389,9 +389,8 @@ def validate_resampler_func(method: str, args, kwargs) -> None:
389389
if len(args) + len(kwargs) > 0:
390390
if method in RESAMPLER_NUMPY_OPS:
391391
raise UnsupportedFunctionCall(
392-
f"numpy operations are not "
393-
f"valid with resample. Use "
394-
f".resample(...).{method}() instead"
392+
"numpy operations are not valid with resample. "
393+
f"Use .resample(...).{method}() instead"
395394
)
396395
else:
397396
raise TypeError("too many arguments passed in")

pandas/core/generic.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1701,8 +1701,7 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
17011701
multi_message = (
17021702
"\n"
17031703
"For a multi-index, the label must be a "
1704-
"tuple with elements corresponding to "
1705-
"each level."
1704+
"tuple with elements corresponding to each level."
17061705
)
17071706
else:
17081707
multi_message = ""

pandas/core/reshape/concat.py

+3-8
Original file line numberDiff line numberDiff line change
@@ -305,8 +305,7 @@ def __init__(
305305
if isinstance(objs, (NDFrame, str)):
306306
raise TypeError(
307307
"first argument must be an iterable of pandas "
308-
"objects, you passed an object of type "
309-
'"{name}"'.format(name=type(objs).__name__)
308+
f'objects, you passed an object of type "{type(objs).__name__}"'
310309
)
311310

312311
if join == "outer":
@@ -577,10 +576,7 @@ def _maybe_check_integrity(self, concat_index: Index):
577576
if self.verify_integrity:
578577
if not concat_index.is_unique:
579578
overlap = concat_index[concat_index.duplicated()].unique()
580-
raise ValueError(
581-
"Indexes have overlapping values: "
582-
"{overlap!s}".format(overlap=overlap)
583-
)
579+
raise ValueError(f"Indexes have overlapping values: {overlap}")
584580

585581

586582
def _concat_indexes(indexes) -> Index:
@@ -648,8 +644,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
648644
# make sure that all of the passed indices have the same nlevels
649645
if not len({idx.nlevels for idx in indexes}) == 1:
650646
raise AssertionError(
651-
"Cannot concat indices that do "
652-
"not have the same number of levels"
647+
"Cannot concat indices that do not have the same number of levels"
653648
)
654649

655650
# also copies

pandas/core/reshape/merge.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1071,9 +1071,8 @@ def _maybe_coerce_merge_keys(self):
10711071
continue
10721072

10731073
msg = (
1074-
"You are trying to merge on {lk_dtype} and "
1075-
"{rk_dtype} columns. If you wish to proceed "
1076-
"you should use pd.concat".format(lk_dtype=lk.dtype, rk_dtype=rk.dtype)
1074+
f"You are trying to merge on {lk.dtype} and "
1075+
f"{rk.dtype} columns. If you wish to proceed you should use pd.concat"
10771076
)
10781077

10791078
# if we are numeric, then allow differing

pandas/io/clipboards.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,7 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
6969
kwargs["engine"] = "python"
7070
elif len(sep) > 1 and kwargs.get("engine") == "c":
7171
warnings.warn(
72-
"read_clipboard with regex separator does not work "
73-
"properly with c engine"
72+
"read_clipboard with regex separator does not work properly with c engine"
7473
)
7574

7675
return read_csv(StringIO(text), sep=sep, **kwargs)

pandas/io/common.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,7 @@ def _expand_user(
7878
def validate_header_arg(header) -> None:
7979
if isinstance(header, bool):
8080
raise TypeError(
81-
"Passing a bool to header is invalid. "
82-
"Use header=None for no header or "
81+
"Passing a bool to header is invalid. Use header=None for no header or "
8382
"header=int or list-like of ints to specify "
8483
"the row(s) making up the column names"
8584
)

pandas/io/date_converters.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,7 @@ def _check_columns(cols):
5757
for i, n in enumerate(map(len, tail)):
5858
if n != N:
5959
raise AssertionError(
60-
f"All columns must have the same length: {N}; "
61-
f"column {i} has length {n}"
60+
f"All columns must have the same length: {N}; column {i} has length {n}"
6261
)
6362

6463
return N

pandas/io/feather_format.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -37,16 +37,13 @@ def to_feather(df: DataFrame, path):
3737
typ = type(df.index)
3838
raise ValueError(
3939
f"feather does not support serializing {typ} "
40-
"for the index; you can .reset_index() "
41-
"to make the index into column(s)"
40+
"for the index; you can .reset_index() to make the index into column(s)"
4241
)
4342

4443
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
4544
raise ValueError(
46-
"feather does not support serializing a "
47-
"non-default index for the index; you "
48-
"can .reset_index() to make the index "
49-
"into column(s)"
45+
"feather does not support serializing a non-default index for the index; "
46+
"you can .reset_index() to make the index into column(s)"
5047
)
5148

5249
if df.index.name is not None:

pandas/io/html.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -899,8 +899,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
899899
f"The flavor {flav} failed to parse your input. "
900900
"Since you passed a non-rewindable file "
901901
"object, we can't rewind it to try "
902-
"another parser. Try read_html() with a "
903-
"different flavor."
902+
"another parser. Try read_html() with a different flavor."
904903
)
905904

906905
retained = caught

pandas/io/json/_normalize.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -317,8 +317,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
317317
meta_val = np.nan
318318
else:
319319
raise KeyError(
320-
"Try running with "
321-
"errors='ignore' as key "
320+
"Try running with errors='ignore' as key "
322321
f"{e} is not always present"
323322
)
324323
meta_vals[key].append(meta_val)

pandas/io/parquet.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,7 @@ def get_engine(engine: str) -> "BaseImpl":
3232
raise ImportError(
3333
"Unable to find a usable engine; "
3434
"tried using: 'pyarrow', 'fastparquet'.\n"
35-
"pyarrow or fastparquet is required for parquet "
36-
"support"
35+
"pyarrow or fastparquet is required for parquet support"
3736
)
3837

3938
if engine == "pyarrow":
@@ -156,8 +155,7 @@ def write(
156155
if "partition_on" in kwargs and partition_cols is not None:
157156
raise ValueError(
158157
"Cannot use both partition_on and "
159-
"partition_cols. Use partition_cols for "
160-
"partitioning data"
158+
"partition_cols. Use partition_cols for partitioning data"
161159
)
162160
elif "partition_on" in kwargs:
163161
partition_cols = kwargs.pop("partition_on")

pandas/io/parsers.py

+9-17
Original file line numberDiff line numberDiff line change
@@ -612,8 +612,7 @@ def parser_f(
612612
if delim_whitespace and delimiter != default_sep:
613613
raise ValueError(
614614
"Specified a delimiter with both sep and "
615-
"delim_whitespace=True; you can only "
616-
"specify one."
615+
"delim_whitespace=True; you can only specify one."
617616
)
618617

619618
if engine is not None:
@@ -968,8 +967,7 @@ def _clean_options(self, options, engine):
968967
fallback_reason = (
969968
"the 'c' engine does not support "
970969
"regex separators (separators > 1 char and "
971-
r"different from '\s+' are "
972-
"interpreted as regex)"
970+
r"different from '\s+' are interpreted as regex)"
973971
)
974972
engine = "python"
975973
elif delim_whitespace:
@@ -1000,8 +998,7 @@ def _clean_options(self, options, engine):
1000998
fallback_reason = (
1001999
"ord(quotechar) > 127, meaning the "
10021000
"quotechar is larger than one byte, "
1003-
"and the 'c' engine does not support "
1004-
"such quotechars"
1001+
"and the 'c' engine does not support such quotechars"
10051002
)
10061003
engine = "python"
10071004

@@ -1119,9 +1116,8 @@ def _make_engine(self, engine="c"):
11191116
klass = FixedWidthFieldParser
11201117
else:
11211118
raise ValueError(
1122-
f"Unknown engine: {engine} (valid options are "
1123-
'"c", "python", or '
1124-
'"python-fwf")'
1119+
f"Unknown engine: {engine} (valid options "
1120+
'are "c", "python", or "python-fwf")'
11251121
)
11261122
self._engine = klass(self.f, **self.options)
11271123

@@ -1230,8 +1226,7 @@ def _validate_usecols_names(usecols, names):
12301226
missing = [c for c in usecols if c not in names]
12311227
if len(missing) > 0:
12321228
raise ValueError(
1233-
"Usecols do not match columns, "
1234-
f"columns expected but not found: {missing}"
1229+
f"Usecols do not match columns, columns expected but not found: {missing}"
12351230
)
12361231

12371232
return usecols
@@ -1325,8 +1320,7 @@ def _validate_parse_dates_arg(parse_dates):
13251320
that is the case.
13261321
"""
13271322
msg = (
1328-
"Only booleans, lists, and "
1329-
"dictionaries are accepted "
1323+
"Only booleans, lists, and dictionaries are accepted "
13301324
"for the 'parse_dates' parameter"
13311325
)
13321326

@@ -1680,8 +1674,7 @@ def _convert_to_ndarrays(
16801674
warnings.warn(
16811675
(
16821676
"Both a converter and dtype were specified "
1683-
f"for column {c} - only the converter will "
1684-
"be used"
1677+
f"for column {c} - only the converter will be used"
16851678
),
16861679
ParserWarning,
16871680
stacklevel=7,
@@ -1826,8 +1819,7 @@ def _cast_types(self, values, cast_type, column):
18261819
except NotImplementedError:
18271820
raise NotImplementedError(
18281821
f"Extension Array: {array_type} must implement "
1829-
"_from_sequence_of_strings in order "
1830-
"to be used in parser methods"
1822+
"_from_sequence_of_strings in order to be used in parser methods"
18311823
)
18321824

18331825
else:

pandas/io/pytables.py

+7-12
Original file line numberDiff line numberDiff line change
@@ -413,8 +413,8 @@ def read_hdf(
413413
for group_to_check in groups[1:]:
414414
if not _is_metadata_of(group_to_check, candidate_only_group):
415415
raise ValueError(
416-
"key must be provided when HDF5 file "
417-
"contains multiple datasets."
416+
"key must be provided when HDF5 "
417+
"file contains multiple datasets."
418418
)
419419
key = candidate_only_group._v_pathname
420420
return store.select(
@@ -1240,8 +1240,7 @@ def append_to_multiple(
12401240
if v is None:
12411241
if remain_key is not None:
12421242
raise ValueError(
1243-
"append_to_multiple can only have one value in d that "
1244-
"is None"
1243+
"append_to_multiple can only have one value in d that is None"
12451244
)
12461245
remain_key = k
12471246
else:
@@ -2313,8 +2312,7 @@ def validate_attr(self, append):
23132312
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
23142313
if existing_dtype is not None and existing_dtype != self.dtype:
23152314
raise ValueError(
2316-
"appended items dtype do not match existing "
2317-
"items dtype in table!"
2315+
"appended items dtype do not match existing items dtype in table!"
23182316
)
23192317

23202318
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
@@ -2680,14 +2678,12 @@ def validate_read(self, columns, where):
26802678
if columns is not None:
26812679
raise TypeError(
26822680
"cannot pass a column specification when reading "
2683-
"a Fixed format store. this store must be "
2684-
"selected in its entirety"
2681+
"a Fixed format store. this store must be selected in its entirety"
26852682
)
26862683
if where is not None:
26872684
raise TypeError(
26882685
"cannot pass a where specification when reading "
2689-
"from a Fixed format store. this store must be "
2690-
"selected in its entirety"
2686+
"from a Fixed format store. this store must be selected in its entirety"
26912687
)
26922688

26932689
@property
@@ -2908,8 +2904,7 @@ def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None)
29082904

29092905
if is_categorical_dtype(value):
29102906
raise NotImplementedError(
2911-
"Cannot store a category dtype in "
2912-
"a HDF5 dataset that uses format="
2907+
"Cannot store a category dtype in a HDF5 dataset that uses format="
29132908
'"fixed". Use format="table".'
29142909
)
29152910
if not empty_array:

pandas/io/sql.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -977,8 +977,7 @@ def _sqlalchemy_type(self, col):
977977
if col_type == "timedelta64":
978978
warnings.warn(
979979
"the 'timedelta' type is not supported, and will be "
980-
"written as integer values (ns frequency) to the "
981-
"database.",
980+
"written as integer values (ns frequency) to the database.",
982981
UserWarning,
983982
stacklevel=8,
984983
)
@@ -1413,8 +1412,7 @@ def _get_valid_sqlite_name(name):
14131412

14141413
_SAFE_NAMES_WARNING = (
14151414
"The spaces in these column names will not be changed. "
1416-
"In pandas versions < 0.14, spaces were converted to "
1417-
"underscores."
1415+
"In pandas versions < 0.14, spaces were converted to underscores."
14181416
)
14191417

14201418

@@ -1528,8 +1526,7 @@ def _sql_type_name(self, col):
15281526
if col_type == "timedelta64":
15291527
warnings.warn(
15301528
"the 'timedelta' type is not supported, and will be "
1531-
"written as integer values (ns frequency) to the "
1532-
"database.",
1529+
"written as integer values (ns frequency) to the database.",
15331530
UserWarning,
15341531
stacklevel=8,
15351532
)

pandas/io/stata.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -639,8 +639,7 @@ def __init__(self, catarray, encoding="latin-1"):
639639
if self.text_len > 32000:
640640
raise ValueError(
641641
"Stata value labels for a single variable must "
642-
"have a combined length less than 32,000 "
643-
"characters."
642+
"have a combined length less than 32,000 characters."
644643
)
645644

646645
# Ensure int32
@@ -1729,9 +1728,10 @@ def _do_select_columns(self, data, columns):
17291728
raise ValueError("columns contains duplicate entries")
17301729
unmatched = column_set.difference(data.columns)
17311730
if unmatched:
1731+
joined = ", ".join(list(unmatched))
17321732
raise ValueError(
1733-
"The following columns were not found in the "
1734-
"Stata data set: " + ", ".join(list(unmatched))
1733+
"The following columns were not "
1734+
f"found in the Stata data set: {joined}"
17351735
)
17361736
# Copy information for retained columns for later processing
17371737
dtyplist = []

pandas/plotting/_matplotlib/converter.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -421,8 +421,7 @@ def __call__(self):
421421
if estimate > self.MAXTICKS * 2:
422422
raise RuntimeError(
423423
"MillisecondLocator estimated to generate "
424-
f"{estimate:d} ticks from {dmin} to {dmax}: "
425-
"exceeds Locator.MAXTICKS"
424+
f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS"
426425
f"* 2 ({self.MAXTICKS * 2:d}) "
427426
)
428427

pandas/plotting/_matplotlib/hist.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -318,8 +318,7 @@ def hist_series(
318318
if "figure" in kwds:
319319
raise ValueError(
320320
"Cannot pass 'figure' when using the "
321-
"'by' argument, since a new 'Figure' instance "
322-
"will be created"
321+
"'by' argument, since a new 'Figure' instance will be created"
323322
)
324323
axes = _grouped_hist(
325324
self,

0 commit comments

Comments
 (0)