diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 1627b2f4d3ec3..17492f5d5cac9 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -19,7 +19,18 @@ from .table_schema import build_table_schema, parse_table_schema from pandas.core.dtypes.common import is_period_dtype -loads = json.loads + +def loads(*args, **kwargs): + try: + return json.loads(*args, **kwargs) + except ValueError as err: + # if ValueError is from too large aa value return [] + if err.args[0] == 'Value is too big': + return [] + else: + # in case of something like '{"key":b:a:d}' re raise + raise + dumps = json.dumps TABLE_SCHEMA_VERSION = '0.20.0' diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 7e497c395266f..0e72e5d3abd84 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1222,3 +1222,10 @@ def test_index_false_error_to_json(self, orient): "valid when 'orient' is " "'split' or 'table'"): df.to_json(orient=orient, index=False) + + @pytest.mark.parametrize('orient', [ + 'records', 'index', 'columns', 'values' + ]) + def test_int_overflow(self, orient): + bar = json.dumps({'foo': 2**100000}) + read_json(bar, orient=orient)