Skip to content

Commit bbe2196

Browse files
committed
Merge remote-tracking branch 'upstream/master' into 30966-str-validate
2 parents 4c2416d + 2075539 commit bbe2196

22 files changed

+92
-88
lines changed

pandas/__init__.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,7 @@
3535
raise ImportError(
3636
f"C extension: {module} not built. If you want to import "
3737
"pandas from the source directory, you may need to run "
38-
"'python setup.py build_ext --inplace --force' to build "
39-
"the C extensions first."
38+
"'python setup.py build_ext --inplace --force' to build the C extensions first."
4039
)
4140

4241
from pandas._config import (
@@ -198,8 +197,7 @@ def __getattr__(name):
198197

199198
warnings.warn(
200199
"The Panel class is removed from pandas. Accessing it "
201-
"from the top-level namespace will also be removed in "
202-
"the next version",
200+
"from the top-level namespace will also be removed in the next version",
203201
FutureWarning,
204202
stacklevel=2,
205203
)
@@ -238,8 +236,7 @@ class Panel:
238236
elif name in {"SparseSeries", "SparseDataFrame"}:
239237
warnings.warn(
240238
f"The {name} class is removed from pandas. Accessing it from "
241-
"the top-level namespace will also be removed in the next "
242-
"version",
239+
"the top-level namespace will also be removed in the next version",
243240
FutureWarning,
244241
stacklevel=2,
245242
)

pandas/_config/config.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -165,8 +165,7 @@ def _reset_option(pat, silent=False):
165165
raise ValueError(
166166
"You must specify at least 4 characters when "
167167
"resetting multiple keys, use the special keyword "
168-
'"all" to reset all the options to their default '
169-
"value"
168+
'"all" to reset all the options to their default value'
170169
)
171170

172171
for k in keys:

pandas/_libs/index.pyx

-2
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,6 @@ cdef class IndexEngine:
8585
"""
8686
cdef:
8787
object loc
88-
void* data_ptr
8988

9089
loc = self.get_loc(key)
9190
if isinstance(loc, slice) or util.is_array(loc):
@@ -101,7 +100,6 @@ cdef class IndexEngine:
101100
"""
102101
cdef:
103102
object loc
104-
void* data_ptr
105103

106104
loc = self.get_loc(key)
107105
value = convert_scalar(arr, value)

pandas/core/indexes/datetimelike.py

+4-10
Original file line numberDiff line numberDiff line change
@@ -156,13 +156,11 @@ def equals(self, other) -> bool:
156156
def __contains__(self, key):
157157
try:
158158
res = self.get_loc(key)
159-
return (
160-
is_scalar(res)
161-
or isinstance(res, slice)
162-
or (is_list_like(res) and len(res))
163-
)
164159
except (KeyError, TypeError, ValueError):
165160
return False
161+
return bool(
162+
is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res))
163+
)
166164

167165
# Try to run function on index first, and then on elements of index
168166
# Especially important for group-by functionality
@@ -875,11 +873,7 @@ def _is_convertible_to_index_for_join(cls, other: Index) -> bool:
875873

876874
def _wrap_joined_index(self, joined, other):
877875
name = get_op_result_name(self, other)
878-
if (
879-
isinstance(other, type(self))
880-
and self.freq == other.freq
881-
and self._can_fast_union(other)
882-
):
876+
if self._can_fast_union(other):
883877
joined = self._shallow_copy(joined)
884878
joined.name = name
885879
return joined

pandas/core/ops/__init__.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -664,8 +664,7 @@ def to_series(right):
664664

665665
elif right.ndim > 2:
666666
raise ValueError(
667-
"Unable to coerce to Series/DataFrame, dim "
668-
f"must be <= 2: {right.shape}"
667+
f"Unable to coerce to Series/DataFrame, dim must be <= 2: {right.shape}"
669668
)
670669

671670
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):

pandas/core/reshape/melt.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,7 @@ def melt(
5252
if not missing.empty:
5353
raise KeyError(
5454
"The following 'id_vars' are not present "
55-
"in the DataFrame: {missing}"
56-
"".format(missing=list(missing))
55+
f"in the DataFrame: {list(missing)}"
5756
)
5857
else:
5958
id_vars = []
@@ -74,8 +73,7 @@ def melt(
7473
if not missing.empty:
7574
raise KeyError(
7675
"The following 'value_vars' are not present in "
77-
"the DataFrame: {missing}"
78-
"".format(missing=list(missing))
76+
f"the DataFrame: {list(missing)}"
7977
)
8078
frame = frame.loc[:, id_vars + value_vars]
8179
else:

pandas/core/reshape/merge.py

+11-23
Original file line numberDiff line numberDiff line change
@@ -600,13 +600,11 @@ def __init__(
600600

601601
if not is_bool(left_index):
602602
raise ValueError(
603-
"left_index parameter must be of type bool, not "
604-
"{left_index}".format(left_index=type(left_index))
603+
f"left_index parameter must be of type bool, not {type(left_index)}"
605604
)
606605
if not is_bool(right_index):
607606
raise ValueError(
608-
"right_index parameter must be of type bool, not "
609-
"{right_index}".format(right_index=type(right_index))
607+
f"right_index parameter must be of type bool, not {type(right_index)}"
610608
)
611609

612610
# warn user when merging between different levels
@@ -1092,8 +1090,7 @@ def _maybe_coerce_merge_keys(self):
10921090
warnings.warn(
10931091
"You are merging on int and float "
10941092
"columns where the float values "
1095-
"are not equal to their int "
1096-
"representation",
1093+
"are not equal to their int representation",
10971094
UserWarning,
10981095
)
10991096
continue
@@ -1103,8 +1100,7 @@ def _maybe_coerce_merge_keys(self):
11031100
warnings.warn(
11041101
"You are merging on int and float "
11051102
"columns where the float values "
1106-
"are not equal to their int "
1107-
"representation",
1103+
"are not equal to their int representation",
11081104
UserWarning,
11091105
)
11101106
continue
@@ -1251,20 +1247,17 @@ def _validate(self, validate: str):
12511247
)
12521248
elif not left_unique:
12531249
raise MergeError(
1254-
"Merge keys are not unique in left dataset; "
1255-
"not a one-to-one merge"
1250+
"Merge keys are not unique in left dataset; not a one-to-one merge"
12561251
)
12571252
elif not right_unique:
12581253
raise MergeError(
1259-
"Merge keys are not unique in right dataset; "
1260-
"not a one-to-one merge"
1254+
"Merge keys are not unique in right dataset; not a one-to-one merge"
12611255
)
12621256

12631257
elif validate in ["one_to_many", "1:m"]:
12641258
if not left_unique:
12651259
raise MergeError(
1266-
"Merge keys are not unique in left dataset; "
1267-
"not a one-to-many merge"
1260+
"Merge keys are not unique in left dataset; not a one-to-many merge"
12681261
)
12691262

12701263
elif validate in ["many_to_one", "m:1"]:
@@ -1833,8 +1826,7 @@ def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool =
18331826
raise AssertionError(
18341827
"If more than one join key is given then "
18351828
"'right_ax' must be a MultiIndex and the "
1836-
"number of join keys must be the number of "
1837-
"levels in right_ax"
1829+
"number of join keys must be the number of levels in right_ax"
18381830
)
18391831

18401832
left_indexer, right_indexer = _get_multiindex_indexer(
@@ -2004,8 +1996,7 @@ def _validate_operand(obj: FrameOrSeries) -> "DataFrame":
20041996
return obj.to_frame()
20051997
else:
20061998
raise TypeError(
2007-
"Can only merge Series or DataFrame objects, "
2008-
"a {obj} was passed".format(obj=type(obj))
1999+
f"Can only merge Series or DataFrame objects, a {type(obj)} was passed"
20092000
)
20102001

20112002

@@ -2021,10 +2012,7 @@ def _items_overlap_with_suffix(left: Index, lsuffix, right: Index, rsuffix):
20212012
return left, right
20222013

20232014
if not lsuffix and not rsuffix:
2024-
raise ValueError(
2025-
"columns overlap but no suffix specified: "
2026-
"{rename}".format(rename=to_rename)
2027-
)
2015+
raise ValueError(f"columns overlap but no suffix specified: {to_rename}")
20282016

20292017
def renamer(x, suffix):
20302018
"""
@@ -2043,7 +2031,7 @@ def renamer(x, suffix):
20432031
x : renamed column name
20442032
"""
20452033
if x in to_rename and suffix is not None:
2046-
return "{x}{suffix}".format(x=x, suffix=suffix)
2034+
return f"{x}{suffix}"
20472035
return x
20482036

20492037
lrenamer = partial(renamer, suffix=lsuffix)

pandas/core/reshape/tile.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -363,8 +363,7 @@ def _bins_to_cuts(
363363

364364
if duplicates not in ["raise", "drop"]:
365365
raise ValueError(
366-
"invalid value for 'duplicates' parameter, "
367-
"valid options are: raise, drop"
366+
"invalid value for 'duplicates' parameter, valid options are: raise, drop"
368367
)
369368

370369
if isinstance(bins, IntervalIndex):

pandas/core/tools/datetimes.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -231,9 +231,7 @@ def _return_parsed_timezone_results(result, timezones, tz, name):
231231
"""
232232
if tz is not None:
233233
raise ValueError(
234-
"Cannot pass a tz argument when "
235-
"parsing strings with timezone "
236-
"information."
234+
"Cannot pass a tz argument when parsing strings with timezone information."
237235
)
238236
tz_results = np.array(
239237
[Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]
@@ -817,8 +815,7 @@ def f(value):
817815
required = ",".join(req)
818816
raise ValueError(
819817
"to assemble mappings requires at least that "
820-
f"[year, month, day] be specified: [{required}] "
821-
"is missing"
818+
f"[year, month, day] be specified: [{required}] is missing"
822819
)
823820

824821
# keys we don't recognize

pandas/core/window/common.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,7 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False):
9898
and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))
9999
):
100100
raise TypeError(
101-
"arguments to moment function must be of type "
102-
"np.ndarray/Series/DataFrame"
101+
"arguments to moment function must be of type np.ndarray/Series/DataFrame"
103102
)
104103

105104
if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance(

pandas/core/window/rolling.py

+4-7
Original file line numberDiff line numberDiff line change
@@ -1820,8 +1820,7 @@ def _on(self) -> Index:
18201820
else:
18211821
raise ValueError(
18221822
f"invalid on specified as {self.on}, "
1823-
"must be a column (of DataFrame), an Index "
1824-
"or None"
1823+
"must be a column (of DataFrame), an Index or None"
18251824
)
18261825

18271826
def validate(self):
@@ -1838,9 +1837,8 @@ def validate(self):
18381837
# we don't allow center
18391838
if self.center:
18401839
raise NotImplementedError(
1841-
"center is not implemented "
1842-
"for datetimelike and offset "
1843-
"based windows"
1840+
"center is not implemented for "
1841+
"datetimelike and offset based windows"
18441842
)
18451843

18461844
# this will raise ValueError on non-fixed freqs
@@ -1886,8 +1884,7 @@ def _validate_freq(self):
18861884
except (TypeError, ValueError):
18871885
raise ValueError(
18881886
f"passed window {self.window} is not "
1889-
"compatible with a datetimelike "
1890-
"index"
1887+
"compatible with a datetimelike index"
18911888
)
18921889

18931890
_agg_see_also_doc = dedent(

pandas/io/excel/_util.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -136,8 +136,7 @@ def _maybe_convert_usecols(usecols):
136136
if is_integer(usecols):
137137
raise ValueError(
138138
"Passing an integer for `usecols` is no longer supported. "
139-
"Please pass in a list of int from 0 to `usecols` "
140-
"inclusive instead."
139+
"Please pass in a list of int from 0 to `usecols` inclusive instead."
141140
)
142141

143142
if isinstance(usecols, str):

pandas/io/formats/format.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -737,12 +737,8 @@ def _to_str_columns(self) -> List[List[str]]:
737737
self.header = cast(List[str], self.header)
738738
if len(self.header) != len(self.columns):
739739
raise ValueError(
740-
(
741-
"Writing {ncols} cols but got {nalias} "
742-
"aliases".format(
743-
ncols=len(self.columns), nalias=len(self.header)
744-
)
745-
)
740+
f"Writing {len(self.columns)} cols "
741+
f"but got {len(self.header)} aliases"
746742
)
747743
str_columns = [[label] for label in self.header]
748744
else:

pandas/io/formats/html.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -216,8 +216,8 @@ def _write_table(self, indent: int = 0) -> None:
216216
self.classes = self.classes.split()
217217
if not isinstance(self.classes, (list, tuple)):
218218
raise TypeError(
219-
"classes must be a string, list, or tuple, "
220-
"not {typ}".format(typ=type(self.classes))
219+
"classes must be a string, list, "
220+
f"or tuple, not {type(self.classes)}"
221221
)
222222
_classes.extend(self.classes)
223223

pandas/io/formats/latex.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -114,8 +114,7 @@ def pad_empties(x):
114114
column_format = index_format + column_format
115115
elif not isinstance(self.column_format, str): # pragma: no cover
116116
raise AssertionError(
117-
"column_format must be str or unicode, "
118-
"not {typ}".format(typ=type(column_format))
117+
f"column_format must be str or unicode, not {type(column_format)}"
119118
)
120119
else:
121120
column_format = self.column_format

pandas/io/sas/sas.pyx

+3-2
Original file line numberDiff line numberDiff line change
@@ -267,8 +267,9 @@ cdef class Parser:
267267
elif column_types[j] == b's':
268268
self.column_types[j] = column_type_string
269269
else:
270-
raise ValueError("unknown column type: "
271-
f"{self.parser.columns[j].ctype}")
270+
raise ValueError(
271+
f"unknown column type: {self.parser.columns[j].ctype}"
272+
)
272273

273274
# compression
274275
if parser.compression == const.rle_compression:

pandas/io/sas/sas7bdat.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -459,8 +459,7 @@ def _process_columnsize_subheader(self, offset, length):
459459
if self.col_count_p1 + self.col_count_p2 != self.column_count:
460460
print(
461461
f"Warning: column count mismatch ({self.col_count_p1} + "
462-
f"{self.col_count_p2} != "
463-
f"{self.column_count})\n"
462+
f"{self.col_count_p2} != {self.column_count})\n"
464463
)
465464

466465
# Unknown purpose
@@ -672,8 +671,7 @@ def _read_next_page(self):
672671
self.close()
673672
msg = (
674673
"failed to read complete page from file (read "
675-
f"{len(self._cached_page):d} of "
676-
f"{self._page_length:d} bytes)"
674+
f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
677675
)
678676
raise ValueError(msg)
679677

pandas/io/sas/sasreader.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,7 @@ def read_sas(
4949
if format is None:
5050
buffer_error_msg = (
5151
"If this is a buffer object rather "
52-
"than a string name, you must specify "
53-
"a format string"
52+
"than a string name, you must specify a format string"
5453
)
5554
filepath_or_buffer = stringify_path(filepath_or_buffer)
5655
if not isinstance(filepath_or_buffer, str):

0 commit comments

Comments
 (0)