Skip to content

STY: Removed unconcatenated strings #30464

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Dec 25, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -916,9 +916,7 @@ def _is_unique(self):

def _add_datetimelike_scalar(self, other):
# Overriden by TimedeltaArray
raise TypeError(
f"cannot add {type(self).__name__} and " f"{type(other).__name__}"
)
raise TypeError(f"cannot add {type(self).__name__} and {type(other).__name__}")

_add_datetime_arraylike = _add_datetimelike_scalar

Expand Down
8 changes: 2 additions & 6 deletions pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -820,9 +820,7 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False):
if dtype.kind == "M":
return arr.astype(dtype)

raise TypeError(
f"cannot astype a datetimelike from [{arr.dtype}] " f"to [{dtype}]"
)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")

elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
Expand All @@ -842,9 +840,7 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False):
elif dtype == _TD_DTYPE:
return arr.astype(_TD_DTYPE, copy=copy)

raise TypeError(
f"cannot astype a timedelta from [{arr.dtype}] " f"to [{dtype}]"
)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")

elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/json/_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def to_json(

if not index and orient not in ["split", "table"]:
raise ValueError(
"'index=False' is only valid when 'orient' is " "'split' or 'table'"
"'index=False' is only valid when 'orient' is 'split' or 'table'"
)

path_or_buf = stringify_path(path_or_buf)
Expand Down
8 changes: 3 additions & 5 deletions pandas/io/json/_table_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,7 @@ def set_default_names(data):
if len(nms) == 1 and data.index.name == "index":
warnings.warn("Index name of 'index' is not round-trippable")
elif len(nms) > 1 and any(x.startswith("level_") for x in nms):
warnings.warn(
"Index names beginning with 'level_' are not " "round-trippable"
)
warnings.warn("Index names beginning with 'level_' are not round-trippable")
return data

data = data.copy()
Expand Down Expand Up @@ -317,12 +315,12 @@ def parse_table_schema(json, precise_float):

# Cannot directly use as_type with timezone data on object; raise for now
if any(str(x).startswith("datetime64[ns, ") for x in dtypes.values()):
raise NotImplementedError('table="orient" can not yet read timezone ' "data")
raise NotImplementedError('table="orient" can not yet read timezone data')

# No ISO constructor for Timedelta as of yet, so need to raise
if "timedelta64" in dtypes.values():
raise NotImplementedError(
'table="orient" can not yet read ' "ISO-formatted Timedelta data"
'table="orient" can not yet read ISO-formatted Timedelta data'
)

df = df.astype(dtypes)
Expand Down
6 changes: 2 additions & 4 deletions pandas/tests/arithmetic/test_period.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,7 @@ def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):

# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
rev_msg = r"Input has different freq=(M|2M|3M) from PeriodArray\(freq=A-DEC\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
Expand All @@ -184,7 +182,7 @@ def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
Period("2011", freq="4M") >= base

idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
rev_msg = r"Input has different freq=(M|2M|3M) from PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
Expand Down
5 changes: 2 additions & 3 deletions pandas/tests/arrays/test_timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,12 @@ def test_other_type_raises(self):
def test_incorrect_dtype_raises(self):
# TODO: why TypeError for 'category' but ValueError for i8?
with pytest.raises(
ValueError, match=r"category cannot be converted " r"to timedelta64\[ns\]"
ValueError, match=r"category cannot be converted to timedelta64\[ns\]"
):
TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype="category")

with pytest.raises(
ValueError,
match=r"dtype int64 cannot be converted " r"to timedelta64\[ns\]",
ValueError, match=r"dtype int64 cannot be converted to timedelta64\[ns\]",
):
TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64"))

Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/base/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -698,9 +698,7 @@ def test_duplicated_drop_duplicates_index(self):

with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
match=r"drop_duplicates\(\) got an unexpected keyword argument",
):
idx.drop_duplicates(inplace=True)

Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/frame/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,11 +479,11 @@ def test_constructor_error_msgs(self):
DataFrame(np.zeros((3, 3, 3)), columns=["A", "B", "C"], index=[1])

# wrong size axis labels
msg = "Shape of passed values " r"is \(2, 3\), indices " r"imply \(1, 3\)"
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B", "C"], index=[1])

msg = "Shape of passed values " r"is \(2, 3\), indices " r"imply \(2, 2\)"
msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B"], index=[1, 2])

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/frame/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ def test_fillna_invalid_method(self, float_frame):

def test_fillna_invalid_value(self, float_frame):
# list
msg = '"value" parameter must be a scalar or dict, but you passed' ' a "{}"'
msg = '"value" parameter must be a scalar or dict, but you passed a "{}"'
with pytest.raises(TypeError, match=msg.format("list")):
float_frame.fillna([1, 2])
# tuple
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexes/datetimes/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -1298,7 +1298,7 @@ def test_dataframe(self, cache):
tm.assert_series_equal(result, expected)

# extra columns
msg = "extra keys have been passed to the datetime assemblage: " r"\[foo\]"
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
with pytest.raises(ValueError, match=msg):
df2 = df.copy()
df2["foo"] = 1
Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/indexing/test_loc.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,9 +242,7 @@ def test_loc_to_fail(self):
with pytest.raises(KeyError, match=msg):
s.loc[[-1, -2]]

msg = (
r"\"None of \[Index\(\['4'\], dtype='object'\)\] are" r" in the \[index\]\""
)
msg = r"\"None of \[Index\(\['4'\], dtype='object'\)\] are in the \[index\]\""
with pytest.raises(KeyError, match=msg):
s.loc[["4"]]

Expand Down
10 changes: 3 additions & 7 deletions pandas/tests/io/formats/test_to_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,16 +376,14 @@ def test_to_csv_string_with_lf(self):
assert f.read() == expected_noarg
with tm.ensure_clean("lf_test.csv") as path:
# case 2: LF as line terminator
expected_lf = b"int,str_lf\n" b"1,abc\n" b'2,"d\nef"\n' b'3,"g\nh\n\ni"\n'
expected_lf = b'int,str_lf\n1,abc\n2,"d\nef"\n3,"g\nh\n\ni"\n'
df.to_csv(path, line_terminator="\n", index=False)
with open(path, "rb") as f:
assert f.read() == expected_lf
with tm.ensure_clean("lf_test.csv") as path:
# case 3: CRLF as line terminator
# 'line_terminator' should not change inner element
expected_crlf = (
b"int,str_lf\r\n" b"1,abc\r\n" b'2,"d\nef"\r\n' b'3,"g\nh\n\ni"\r\n'
)
expected_crlf = b'int,str_lf\r\n1,abc\r\n2,"d\nef"\r\n3,"g\nh\n\ni"\r\n'
df.to_csv(path, line_terminator="\r\n", index=False)
with open(path, "rb") as f:
assert f.read() == expected_crlf
Expand All @@ -412,9 +410,7 @@ def test_to_csv_string_with_crlf(self):
assert f.read() == expected_noarg
with tm.ensure_clean("crlf_test.csv") as path:
# case 2: LF as line terminator
expected_lf = (
b"int,str_crlf\n" b"1,abc\n" b'2,"d\r\nef"\n' b'3,"g\r\nh\r\n\r\ni"\n'
)
expected_lf = b'int,str_crlf\n1,abc\n2,"d\r\nef"\n3,"g\r\nh\r\n\r\ni"\n'
df.to_csv(path, line_terminator="\n", index=False)
with open(path, "rb") as f:
assert f.read() == expected_lf
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/json/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -1244,7 +1244,7 @@ def test_to_jsonl(self):
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n' '{"a\\\\":"foo\\"","b":"bar"}'
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(pd.read_json(result, lines=True), df)

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/json/test_readlines.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def test_to_jsonl():
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n' '{"a\\\\":"foo\\"","b":"bar"}'
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(read_json(result, lines=True), df)

Expand Down
6 changes: 3 additions & 3 deletions pandas/tests/io/json/test_ujson.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ def test_encode_decimal(self):
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n ' '\\r \\t <\\/script> &"'
not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
html_encoded = (
'"A string \\\\ \\/ \\b \\f \\n \\r \\t ' '\\u003c\\/script\\u003e \\u0026"'
'"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
)

def helper(expected_output, **encode_kwargs):
Expand Down Expand Up @@ -816,7 +816,7 @@ def test_array_numpy_labelled(self):

# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = '[{"a": 42, "b":31}, {"a": 24, "c": 99}, ' '{"a": 2.4, "b": 78}]'
input_dumps = '[{"a": 42, "b":31}, {"a": 24, "c": 99}, {"a": 2.4, "b": 78}]'
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
Expand Down
5 changes: 2 additions & 3 deletions pandas/tests/io/parser/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1144,9 +1144,8 @@ def test_escapechar(all_parsers):
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)

assert result["SEARCH_TERM"][2] == (
'SLAGBORD, "Bergslagen", ' "IKEA:s 1700-tals serie"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie'

tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))


Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/parser/test_textreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def test_header_not_enough_lines(self):
assert_array_dicts_equal(recs, expected)

def test_escapechar(self):
data = '\\"hello world"\n' '\\"hello world"\n' '\\"hello world"'
data = '\\"hello world"\n\\"hello world"\n\\"hello world"'

reader = TextReader(StringIO(data), delimiter=",", header=None, escapechar="\\")
result = reader.read()
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/pytables/test_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -3214,7 +3214,7 @@ def test_frame_select_complex(self, setup_path):
tm.assert_frame_equal(result, expected)

result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
Expand Down
8 changes: 2 additions & 6 deletions pandas/tests/io/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,7 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):

path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
msg1 = r"File (b')?.+does_not_exist\.{}'? does not exist".format(fn_ext)
msg2 = (
r"\[Errno 2\] No such file or directory: '.+does_not_exist" r"\.{}'"
).format(fn_ext)
msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
Expand Down Expand Up @@ -180,9 +178,7 @@ def test_read_expands_user_home_dir(
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))

msg1 = r"File (b')?.+does_not_exist\.{}'? does not exist".format(fn_ext)
msg2 = (
r"\[Errno 2\] No such file or directory:" r" '.+does_not_exist\.{}'"
).format(fn_ext)
msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
Expand Down
12 changes: 3 additions & 9 deletions pandas/tests/reshape/merge/test_join.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,9 +226,7 @@ def test_join_on_fails_with_different_right_index(self):
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
index=tm.makeCustomIndex(10, 2),
)
msg = (
r"len\(left_on\) must equal the number of levels in the index" ' of "right"'
)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, left_on="a", right_index=True)

Expand All @@ -240,9 +238,7 @@ def test_join_on_fails_with_different_left_index(self):
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)}
)
msg = (
r"len\(right_on\) must equal the number of levels in the index" ' of "left"'
)
msg = r'len\(right_on\) must equal the number of levels in the index of "left"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="b", left_index=True)

Expand Down Expand Up @@ -737,9 +733,7 @@ def test_join_multi_to_multi(self, join_type):
)
tm.assert_frame_equal(expected, result)

msg = (
r"len\(left_on\) must equal the number of levels in the index" ' of "right"'
)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
left.join(right, on="xy", how=join_type)

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/reshape/merge/test_merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -744,7 +744,7 @@ def test_overlapping_columns_error_message(self):

# #2649, #10639
df2.columns = ["key1", "foo", "foo"]
msg = r"Data columns not unique: Index\(\['foo', 'foo'\]," r" dtype='object'\)"
msg = r"Data columns not unique: Index\(\['foo', 'foo'\], dtype='object'\)"
with pytest.raises(MergeError, match=msg):
merge(df, df2)

Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/series/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -773,7 +773,7 @@ def test_constructor_dtype_datetime64(self):
dts.astype("int64")

# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to" r" \[int32\]"
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")

Expand Down Expand Up @@ -1198,7 +1198,7 @@ def test_constructor_dtype_timedelta64(self):
td.astype("int64")

# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to" r" \[int32\]"
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/series/test_dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def test_astype_categorical_to_other(self):
expected = s
tm.assert_series_equal(s.astype("category"), expected)
tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
msg = r"could not convert string to float|" r"invalid literal for float\(\)"
msg = r"could not convert string to float|invalid literal for float\(\)"
with pytest.raises(ValueError, match=msg):
s.astype("float64")

Expand Down
8 changes: 4 additions & 4 deletions pandas/tests/series/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,11 +502,11 @@ def test_fillna_int(self):

def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
msg = '"value" parameter must be a scalar or dict, but you passed a' ' "list"'
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
s.fillna([1, 2])

msg = '"value" parameter must be a scalar or dict, but you passed a' ' "tuple"'
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
s.fillna((1, 2))

Expand Down Expand Up @@ -593,11 +593,11 @@ def test_fillna_categorical_raise(self):
with pytest.raises(ValueError, match="fill value must be in categories"):
s.fillna({1: "d", 3: "a"})

msg = '"value" parameter must be a scalar or ' 'dict, but you passed a "list"'
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
with pytest.raises(TypeError, match=msg):
s.fillna(["a", "b"])

msg = '"value" parameter must be a scalar or ' 'dict, but you passed a "tuple"'
msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
with pytest.raises(TypeError, match=msg):
s.fillna(("a", "b"))

Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/tslibs/test_parse_iso8601.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,7 @@ def test_parsers_iso8601_invalid(date_str):

def test_parsers_iso8601_invalid_offset_invalid():
date_str = "2001-01-01 12-34-56"
msg = "Timezone hours offset out of range " 'in datetime string "{s}"'.format(
s=date_str
)
msg = f'Timezone hours offset out of range in datetime string "{date_str}"'

with pytest.raises(ValueError, match=msg):
tslib._test_parse_iso8601(date_str)
Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/util/test_validate_args_and_kwargs.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,7 @@ def test_duplicate_argument():
kwargs = {"foo": None, "bar": None}
args = (None,) # duplicate value for "foo"

msg = r"{fname}\(\) got multiple values for keyword " r"argument '{arg}'".format(
fname=_fname, arg="foo"
)
msg = fr"{_fname}\(\) got multiple values for keyword argument 'foo'"

with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
Expand Down
Loading