Skip to content

STY: use pytest.raises context syntax #24655

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jan 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions pandas/tests/io/formats/test_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -2720,7 +2720,12 @@ def test_format_percentiles():
expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
assert result == expected

pytest.raises(ValueError, fmt.format_percentiles, [0.1, np.nan, 0.5])
pytest.raises(ValueError, fmt.format_percentiles, [-0.001, 0.1, 0.5])
pytest.raises(ValueError, fmt.format_percentiles, [2, 0.1, 0.5])
pytest.raises(ValueError, fmt.format_percentiles, [0.1, 0.5, 'a'])
msg = r"percentiles should all be in the interval \[0,1\]"
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, np.nan, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([-0.001, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([2, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, 0.5, 'a'])
20 changes: 12 additions & 8 deletions pandas/tests/io/json/test_normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,9 @@ def test_meta_name_conflict(self):
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]

with pytest.raises(ValueError):
msg = (r"Conflicting metadata name (foo|bar),"
" need distinguishing prefix")
with pytest.raises(ValueError, match=msg):
json_normalize(data, 'data', meta=['foo', 'bar'])

result = json_normalize(data, 'data', meta=['foo', 'bar'],
Expand Down Expand Up @@ -366,13 +368,15 @@ def test_json_normalize_errors(self):

assert j.fillna('').to_dict() == expected

pytest.raises(KeyError,
json_normalize, data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise'
)
msg = ("Try running with errors='ignore' as key 'trade_version'"
" is not always present")
with pytest.raises(KeyError, match=msg):
json_normalize(
data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise')

def test_donot_drop_nonevalues(self):
# GH21356
Expand Down
85 changes: 56 additions & 29 deletions pandas/tests/io/json/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,12 @@ def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])

pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
msg = "DataFrame index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient='index')
msg = "DataFrame index must be unique for orient='columns'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient='columns')

assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
Expand All @@ -116,9 +120,15 @@ def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])

pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
pytest.raises(ValueError, df.to_json, orient='records')
msg = "DataFrame columns must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient='index')
msg = "DataFrame columns must be unique for orient='columns'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient='columns')
msg = "DataFrame columns must be unique for orient='records'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient='records')

assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
Expand Down Expand Up @@ -156,13 +166,16 @@ def _check_orient(df, orient, dtype=None, numpy=False,
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
msg = ("DataFrame index must be unique for orient='{}'"
.format(orient))
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
# TODO: not executed. fix this.
with pytest.raises(ValueError, match='ksjkajksfjksjfkjs'):
df.to_json(orient=orient)
return

dfjson = df.to_json(orient=orient)
Expand Down Expand Up @@ -326,21 +339,24 @@ def _check_all_orients(df, dtype=None, convert_axes=True,
_check_orient(df.transpose().transpose(), "index", dtype=False)

def test_frame_from_json_bad_data(self):
pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
with pytest.raises(ValueError, match='Expected object or value'):
read_json(StringIO('{"key":b:a:d}'))

# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(ValueError, read_json, json,
orient="split")
msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
read_json(json, orient="split")

# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(AssertionError, read_json, json,
orient="split")
msg = "3 columns passed, passed data had 2 columns"
with pytest.raises(AssertionError, match=msg):
read_json(json, orient="split")

# bad key
json = StringIO('{"badkey":["A","B"],'
Expand Down Expand Up @@ -414,7 +430,9 @@ def test_frame_to_json_float_precision(self):

def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
pytest.raises(ValueError, df.to_json, orient="garbage")
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")

def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
Expand Down Expand Up @@ -540,7 +558,8 @@ def __str__(self):

# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({'A': [binthing]})
with pytest.raises(OverflowError):
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()

# the same with multiple columns threw segfaults
Expand All @@ -565,7 +584,9 @@ def test_label_overflow(self):
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])

pytest.raises(ValueError, s.to_json, orient='index')
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient='index')

assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
Expand Down Expand Up @@ -637,7 +658,9 @@ def _check_all_orients(series, dtype=None, check_index_type=True):

def test_series_to_json_except(self):
s = Series([1, 2, 3])
pytest.raises(ValueError, s.to_json, orient="garbage")
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")

def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
Expand Down Expand Up @@ -752,8 +775,9 @@ def test_w_date(date, date_unit=None):
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')

pytest.raises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format='iso', date_unit='foo')

def test_date_format_series(self):
def test_w_date(date, date_unit=None):
Expand All @@ -774,8 +798,9 @@ def test_w_date(date, date_unit=None):
test_w_date('20130101 20:43:42.123456789', date_unit='ns')

ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
pytest.raises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format='iso', date_unit='foo')

def test_date_unit(self):
df = self.tsframe.copy()
Expand Down Expand Up @@ -940,14 +965,16 @@ def test_default_handler_numpy_unsupported_dtype(self):
assert df.to_json(default_handler=str, orient="values") == expected

def test_default_handler_raises(self):
msg = "raisin"

def my_handler_raises(obj):
raise TypeError("raisin")
pytest.raises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
pytest.raises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({'a': [1, 2, object()]}).to_json(
default_handler=my_handler_raises)
with pytest.raises(TypeError, match=msg):
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises)

def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
Expand Down
8 changes: 6 additions & 2 deletions pandas/tests/io/json/test_ujson.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,9 @@ def test_datetime_units(self):
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value

pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ujson.encode(val, date_unit='foo')

def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
Expand Down Expand Up @@ -695,7 +697,9 @@ def recursive_attr(self):
def __str__(self):
return str(self.val)

pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
msg = "Maximum recursion level reached"
with pytest.raises(OverflowError, match=msg):
ujson.encode(_TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)

Expand Down
20 changes: 10 additions & 10 deletions pandas/tests/io/msgpack/test_except.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@ def test_raise_from_object_hook(self):
def hook(_):
raise DummyException()

pytest.raises(DummyException, unpackb, packb({}), object_hook=hook)
pytest.raises(DummyException, unpackb, packb({'fizz': 'buzz'}),
object_hook=hook)
pytest.raises(DummyException, unpackb, packb({'fizz': 'buzz'}),
object_pairs_hook=hook)
pytest.raises(DummyException, unpackb,
packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
pytest.raises(DummyException, unpackb,
packb({'fizz': {'buzz': 'spam'}}),
object_pairs_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({}), object_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': 'buzz'}), object_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': 'buzz'}), object_pairs_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)

def test_invalid_value(self):
msg = "Unpack failed: error"
Expand Down
16 changes: 10 additions & 6 deletions pandas/tests/io/msgpack/test_limits.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,26 @@ class TestLimits(object):
def test_integer(self):
x = -(2 ** 63)
assert unpackb(packb(x)) == x
pytest.raises((OverflowError, ValueError), packb, x - 1)
msg = (r"((long |Python )?(int )?too (big|large) to convert"
r"( to C (unsigned )?long))?")
with pytest.raises((OverflowError, ValueError), match=msg):
packb(x - 1)
x = 2 ** 64 - 1
assert unpackb(packb(x)) == x
pytest.raises((OverflowError, ValueError), packb, x + 1)
with pytest.raises((OverflowError, ValueError), match=msg):
packb(x + 1)

def test_array_header(self):
packer = Packer()
packer.pack_array_header(2 ** 32 - 1)
pytest.raises((OverflowError, ValueError),
packer.pack_array_header, 2 ** 32)
with pytest.raises((OverflowError, ValueError)):
packer.pack_array_header(2 ** 32)

def test_map_header(self):
packer = Packer()
packer.pack_map_header(2 ** 32 - 1)
pytest.raises((OverflowError, ValueError),
packer.pack_array_header, 2 ** 32)
with pytest.raises((OverflowError, ValueError)):
packer.pack_array_header(2 ** 32)

def test_max_str_len(self):
d = 'x' * 3
Expand Down
19 changes: 8 additions & 11 deletions pandas/tests/io/msgpack/test_obj.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,31 +47,28 @@ def test_decode_pairs_hook(self):
assert unpacked[1] == prod_sum

def test_only_one_obj_hook(self):
pytest.raises(TypeError, unpackb, b'', object_hook=lambda x: x,
object_pairs_hook=lambda x: x)
msg = "object_pairs_hook and object_hook are mutually exclusive"
with pytest.raises(TypeError, match=msg):
unpackb(b'', object_hook=lambda x: x,
object_pairs_hook=lambda x: x)

def test_bad_hook(self):
def f():
msg = r"can't serialize \(1\+2j\)"
with pytest.raises(TypeError, match=msg):
packed = packb([3, 1 + 2j], default=lambda o: o)
unpacked = unpackb(packed, use_list=1) # noqa

pytest.raises(TypeError, f)

def test_array_hook(self):
packed = packb([1, 2, 3])
unpacked = unpackb(packed, list_hook=self._arr_to_str, use_list=1)
assert unpacked == '123'

def test_an_exception_in_objecthook1(self):
def f():
with pytest.raises(DecodeError, match='Ooops!'):
packed = packb({1: {'__complex__': True, 'real': 1, 'imag': 2}})
unpackb(packed, object_hook=self.bad_complex_decoder)

pytest.raises(DecodeError, f)

def test_an_exception_in_objecthook2(self):
def f():
with pytest.raises(DecodeError, match='Ooops!'):
packed = packb({1: [{'__complex__': True, 'real': 1, 'imag': 2}]})
unpackb(packed, list_hook=self.bad_complex_decoder, use_list=1)

pytest.raises(DecodeError, f)
17 changes: 12 additions & 5 deletions pandas/tests/io/msgpack/test_pack.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,17 @@ def testIgnoreUnicodeErrors(self):
assert re == "abcdef"

def testStrictUnicodeUnpack(self):
pytest.raises(UnicodeDecodeError, unpackb, packb(b'abc\xeddef'),
encoding='utf-8', use_list=1)
msg = (r"'utf-*8' codec can't decode byte 0xed in position 3:"
" invalid continuation byte")
with pytest.raises(UnicodeDecodeError, match=msg):
unpackb(packb(b'abc\xeddef'), encoding='utf-8', use_list=1)

def testStrictUnicodePack(self):
pytest.raises(UnicodeEncodeError, packb, compat.u("abc\xeddef"),
encoding='ascii', unicode_errors='strict')
msg = (r"'ascii' codec can't encode character u*'\\xed' in position 3:"
r" ordinal not in range\(128\)")
with pytest.raises(UnicodeEncodeError, match=msg):
packb(compat.u("abc\xeddef"), encoding='ascii',
unicode_errors='strict')

def testIgnoreErrorsPack(self):
re = unpackb(
Expand All @@ -82,7 +87,9 @@ def testIgnoreErrorsPack(self):
assert re == compat.u("abcdef")

def testNoEncoding(self):
pytest.raises(TypeError, packb, compat.u("abc"), encoding=None)
msg = "Can't encode unicode string: no encoding is specified"
with pytest.raises(TypeError, match=msg):
packb(compat.u("abc"), encoding=None)

def testDecodeBinary(self):
re = unpackb(packb("abc"), encoding=None, use_list=1)
Expand Down