From 4fa5aa231e3769c3152d412dba45146e9bd74fe2 Mon Sep 17 00:00:00 2001 From: Jeremy Schendel Date: Thu, 21 Mar 2019 00:22:08 -0600 Subject: [PATCH 1/2] TST: Remove compat.PY2 and compat.PY3 flags from tests --- pandas/tests/arithmetic/test_numeric.py | 16 +- .../arrays/categorical/test_operators.py | 4 - pandas/tests/arrays/categorical/test_repr.py | 13 +- pandas/tests/arrays/test_numpy.py | 7 +- pandas/tests/computation/test_eval.py | 59 ++----- pandas/tests/dtypes/test_inference.py | 18 +- .../tests/extension/decimal/test_decimal.py | 23 +-- pandas/tests/extension/json/array.py | 2 +- pandas/tests/extension/json/test_json.py | 4 +- pandas/tests/extension/test_numpy.py | 11 -- pandas/tests/frame/test_alter_axes.py | 4 +- pandas/tests/frame/test_analytics.py | 19 +-- pandas/tests/frame/test_api.py | 3 +- .../tests/frame/test_axis_select_reindex.py | 3 +- pandas/tests/frame/test_constructors.py | 20 +-- pandas/tests/frame/test_indexing.py | 14 +- pandas/tests/frame/test_missing.py | 3 +- pandas/tests/frame/test_operators.py | 2 +- pandas/tests/frame/test_quantile.py | 3 - pandas/tests/frame/test_repr_info.py | 11 +- pandas/tests/frame/test_sorting.py | 3 +- pandas/tests/frame/test_timeseries.py | 3 +- pandas/tests/generic/test_generic.py | 6 +- pandas/tests/groupby/test_whitelist.py | 13 +- pandas/tests/indexes/common.py | 4 +- pandas/tests/indexes/datetimes/test_misc.py | 10 +- .../tests/indexes/datetimes/test_timezones.py | 3 +- pandas/tests/indexes/datetimes/test_tools.py | 5 +- pandas/tests/indexes/multi/test_analytics.py | 3 +- pandas/tests/indexes/multi/test_compat.py | 5 +- pandas/tests/indexes/multi/test_drop.py | 3 +- pandas/tests/indexes/multi/test_format.py | 37 +---- pandas/tests/indexes/multi/test_indexing.py | 3 +- .../tests/indexes/period/test_construction.py | 8 +- pandas/tests/indexes/test_base.py | 125 ++------------ pandas/tests/indexes/test_category.py | 156 ++++-------------- pandas/tests/indexes/test_numeric.py | 3 +- pandas/tests/indexes/test_range.py | 7 +- pandas/tests/indexing/test_coercion.py | 2 +- pandas/tests/indexing/test_floats.py | 5 +- pandas/tests/indexing/test_indexing.py | 4 +- pandas/tests/indexing/test_loc.py | 4 +- pandas/tests/io/formats/test_format.py | 121 +++++--------- pandas/tests/io/formats/test_to_csv.py | 28 +--- pandas/tests/io/formats/test_to_latex.py | 16 +- pandas/tests/io/json/test_normalize.py | 14 +- pandas/tests/io/json/test_pandas.py | 8 +- pandas/tests/io/json/test_ujson.py | 27 +-- pandas/tests/io/msgpack/common.py | 11 +- pandas/tests/io/parser/test_c_parser_only.py | 17 +- pandas/tests/io/parser/test_common.py | 39 ++--- .../io/parser/test_python_parser_only.py | 7 +- pandas/tests/io/parser/test_quoting.py | 7 +- pandas/tests/io/parser/test_read_fwf.py | 10 +- pandas/tests/io/sas/test_sas7bdat.py | 6 - pandas/tests/io/test_clipboard.py | 11 +- pandas/tests/io/test_gbq.py | 7 +- pandas/tests/io/test_html.py | 5 +- pandas/tests/io/test_packers.py | 6 +- pandas/tests/io/test_parquet.py | 8 +- pandas/tests/io/test_pickle.py | 13 +- pandas/tests/io/test_pytables.py | 31 +--- pandas/tests/io/test_sql.py | 3 +- pandas/tests/io/test_stata.py | 7 +- pandas/tests/plotting/test_datetimelike.py | 5 +- pandas/tests/plotting/test_frame.py | 13 +- pandas/tests/reductions/test_reductions.py | 3 +- pandas/tests/reshape/test_concat.py | 11 +- .../tests/scalar/timedelta/test_timedelta.py | 1 - .../scalar/timestamp/test_comparisons.py | 12 +- .../tests/scalar/timestamp/test_timestamp.py | 45 ++--- .../tests/scalar/timestamp/test_unary_ops.py | 27 ++- pandas/tests/series/test_analytics.py | 15 +- pandas/tests/series/test_arithmetic.py | 4 +- pandas/tests/series/test_datetime_values.py | 12 +- pandas/tests/series/test_dtypes.py | 6 - pandas/tests/series/test_missing.py | 3 +- pandas/tests/series/test_operators.py | 10 +- pandas/tests/series/test_rank.py | 3 +- pandas/tests/series/test_repr.py | 25 +-- pandas/tests/series/test_sorting.py | 3 - pandas/tests/series/test_timeseries.py | 3 +- pandas/tests/sparse/frame/test_frame.py | 3 +- pandas/tests/test_algos.py | 3 +- pandas/tests/test_base.py | 5 - pandas/tests/test_compat.py | 9 +- pandas/tests/test_config.py | 3 - pandas/tests/test_downstream.py | 4 +- pandas/tests/test_expressions.py | 37 ++--- pandas/tests/test_nanops.py | 2 - pandas/tests/test_sorting.py | 13 +- pandas/tests/test_strings.py | 75 +++------ pandas/tests/util/test_move.py | 35 ---- 93 files changed, 329 insertions(+), 1114 deletions(-) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index da1b3f1da5322..d556101ac2ecb 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas.compat import PY3, Iterable +from pandas.compat import Iterable import pandas as pd from pandas import Index, Series, Timedelta, TimedeltaIndex @@ -438,17 +438,12 @@ def test_div_equiv_binop(self): tm.assert_series_equal(result, expected) def test_div_int(self, numeric_idx): - # truediv under PY3 idx = numeric_idx result = idx / 1 - expected = idx - if PY3: - expected = expected.astype('float64') + expected = idx.astype('float64') tm.assert_index_equal(result, expected) result = idx / 2 - if PY3: - expected = expected.astype('float64') expected = Index(idx.values / 2) tm.assert_index_equal(result, expected) @@ -1013,13 +1008,8 @@ def test_numeric_compat2(self): expected = pd.RangeIndex(-2, 8, 2) tm.assert_index_equal(result, expected, exact=True) - # truediv under PY3 result = idx / 2 - - if PY3: - expected = pd.RangeIndex(0, 5, 1).astype('float64') - else: - expected = pd.RangeIndex(0, 5, 1) + expected = pd.RangeIndex(0, 5, 1).astype('float64') tm.assert_index_equal(result, expected, exact=True) result = idx / 4 diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index e1264722aedcd..c7a4e0a5fe380 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -4,8 +4,6 @@ import numpy as np import pytest -from pandas.compat import PY2 - import pandas as pd from pandas import Categorical, DataFrame, Series, date_range from pandas.tests.arrays.categorical.common import TestCategorical @@ -19,7 +17,6 @@ def test_categories_none_comparisons(self): 'a', 'c', 'c', 'c'], ordered=True) tm.assert_categorical_equal(factor, self.factor) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_comparisons(self): result = self.factor[self.factor == 'a'] @@ -190,7 +187,6 @@ def test_comparison_with_unknown_scalars(self): tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True])) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") @pytest.mark.parametrize('data,reverse,base', [ (list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])] diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py index 08b32a216ffb6..05d1a0b2a8b8b 100644 --- a/pandas/tests/arrays/categorical/test_repr.py +++ b/pandas/tests/arrays/categorical/test_repr.py @@ -2,7 +2,7 @@ import numpy as np -from pandas.compat import PY3, u +from pandas.compat import u from pandas import ( Categorical, CategoricalIndex, Series, date_range, period_range, @@ -60,18 +60,13 @@ def test_print_none_width(self): assert exp == repr(a) def test_unicode_print(self): - if PY3: - _rep = repr - else: - _rep = unicode # noqa - c = Categorical(['aaaaa', 'bb', 'cccc'] * 20) expected = u"""\ [aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc] Length: 60 Categories (3, object): [aaaaa, bb, cccc]""" - assert _rep(c) == expected + assert repr(c) == expected c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20) expected = u"""\ @@ -79,7 +74,7 @@ def test_unicode_print(self): Length: 60 Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa - assert _rep(c) == expected + assert repr(c) == expected # unicode option should not affect to Categorical, as it doesn't care # the repr width @@ -90,7 +85,7 @@ def test_unicode_print(self): Length: 60 Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa - assert _rep(c) == expected + assert repr(c) == expected def test_categorical_repr(self): c = Categorical([1, 2, 3]) diff --git a/pandas/tests/arrays/test_numpy.py b/pandas/tests/arrays/test_numpy.py index a77f1f8a7b3d1..9cf26dce15d0a 100644 --- a/pandas/tests/arrays/test_numpy.py +++ b/pandas/tests/arrays/test_numpy.py @@ -8,7 +8,6 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import compat from pandas.arrays import PandasArray from pandas.core.arrays.numpy_ import PandasDtype import pandas.util.testing as tm @@ -42,8 +41,7 @@ def any_numpy_array(request): ('float', True), ('complex', True), ('str', False), - pytest.param('bytes', False, - marks=pytest.mark.skipif(compat.PY2, reason="PY2")), + ('bytes', False), ('datetime64[ns]', False), ('object', False), ('void', False), @@ -60,8 +58,7 @@ def test_is_numeric(dtype, expected): ('float', False), ('complex', False), ('str', False), - pytest.param('bytes', False, - marks=pytest.mark.skipif(compat.PY2, reason="PY2")), + ('bytes', False), ('datetime64[ns]', False), ('object', False), ('void', False) diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 49d263feab664..62905ddd3d398 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -7,7 +7,7 @@ from numpy.random import rand, randint, randn import pytest -from pandas.compat import PY3, reduce +from pandas.compat import reduce from pandas.errors import PerformanceWarning import pandas.util._test_decorators as td @@ -102,7 +102,7 @@ def _bool_and_frame(lhs, rhs): def _is_py3_complex_incompat(result, expected): - return (PY3 and isinstance(expected, (complex, np.complexfloating)) and + return (isinstance(expected, (complex, np.complexfloating)) and np.isnan(result)) @@ -1133,50 +1133,27 @@ def test_truediv(self): ex = 's / 1' d = {'s': s} # noqa - if PY3: - res = self.eval(ex, truediv=False) - tm.assert_numpy_array_equal(res, np.array([1.0])) + res = self.eval(ex, truediv=False) + tm.assert_numpy_array_equal(res, np.array([1.0])) - res = self.eval(ex, truediv=True) - tm.assert_numpy_array_equal(res, np.array([1.0])) + res = self.eval(ex, truediv=True) + tm.assert_numpy_array_equal(res, np.array([1.0])) - res = self.eval('1 / 2', truediv=True) - expec = 0.5 - assert res == expec + res = self.eval('1 / 2', truediv=True) + expec = 0.5 + assert res == expec - res = self.eval('1 / 2', truediv=False) - expec = 0.5 - assert res == expec + res = self.eval('1 / 2', truediv=False) + expec = 0.5 + assert res == expec - res = self.eval('s / 2', truediv=False) - expec = 0.5 - assert res == expec + res = self.eval('s / 2', truediv=False) + expec = 0.5 + assert res == expec - res = self.eval('s / 2', truediv=True) - expec = 0.5 - assert res == expec - else: - res = self.eval(ex, truediv=False) - tm.assert_numpy_array_equal(res, np.array([1])) - - res = self.eval(ex, truediv=True) - tm.assert_numpy_array_equal(res, np.array([1.0])) - - res = self.eval('1 / 2', truediv=True) - expec = 0.5 - assert res == expec - - res = self.eval('1 / 2', truediv=False) - expec = 0 - assert res == expec - - res = self.eval('s / 2', truediv=False) - expec = 0 - assert res == expec - - res = self.eval('s / 2', truediv=True) - expec = 0.5 - assert res == expec + res = self.eval('s / 2', truediv=True) + expec = 0.5 + assert res == expec def test_failing_subscript_with_name_error(self): df = DataFrame(np.random.randn(5, 3)) # noqa diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 467f0a8eb0fc4..41fa273586914 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -17,7 +17,7 @@ import pytz from pandas._libs import iNaT, lib, missing as libmissing -from pandas.compat import PY2, StringIO, lrange, u +from pandas.compat import StringIO, lrange, u import pandas.util._test_decorators as td from pandas.core.dtypes import inference @@ -286,18 +286,6 @@ def __hash__(self): # is_hashable() assert not inference.is_hashable(np.array([])) - # old-style classes in Python 2 don't appear hashable to - # collections.Hashable but also seem to support hash() by default - if PY2: - - class OldStyleClass(): - pass - - c = OldStyleClass() - assert not isinstance(c, compat.Hashable) - assert inference.is_hashable(c) - hash(c) # this will not raise - @pytest.mark.parametrize( "ll", [re.compile('ad')]) @@ -330,7 +318,7 @@ def test_is_recompilable_fails(ll): class TestInference(object): def test_infer_dtype_bytes(self): - compare = 'string' if PY2 else 'bytes' + compare = 'bytes' # string array of bytes arr = np.array(list('abc'), dtype='S1') @@ -658,7 +646,7 @@ def test_unicode(self): arr = [u'a', np.nan, u'c'] result = lib.infer_dtype(arr, skipna=True) - expected = 'unicode' if PY2 else 'string' + expected = 'string' assert result == expected @pytest.mark.parametrize('dtype, missing, skipna, expected', [ diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 686bd898f5171..1fa874e7341ca 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -6,7 +6,6 @@ import pytest import pandas as pd -from pandas import compat from pandas.tests.extension import base import pandas.util.testing as tm @@ -114,15 +113,12 @@ def assert_frame_equal(self, left, right, *args, **kwargs): class TestDtype(BaseDecimal, base.BaseDtypeTests): - @pytest.mark.skipif(compat.PY2, reason="Context not hashable.") def test_hashable(self, dtype): pass class TestInterface(BaseDecimal, base.BaseInterfaceTests): - - pytestmark = pytest.mark.skipif(compat.PY2, - reason="Unhashble dtype in Py2.") + pass class TestConstructors(BaseDecimal, base.BaseConstructorsTests): @@ -134,8 +130,7 @@ def test_from_dtype(self, data): class TestReshaping(BaseDecimal, base.BaseReshapingTests): - pytestmark = pytest.mark.skipif(compat.PY2, - reason="Unhashble dtype in Py2.") + pass class TestGetitem(BaseDecimal, base.BaseGetitemTests): @@ -193,13 +188,11 @@ def test_value_counts(self, all_data, dropna): class TestCasting(BaseDecimal, base.BaseCastingTests): - pytestmark = pytest.mark.skipif(compat.PY2, - reason="Unhashble dtype in Py2.") + pass class TestGroupby(BaseDecimal, base.BaseGroupbyTests): - pytestmark = pytest.mark.skipif(compat.PY2, - reason="Unhashble dtype in Py2.") + pass class TestSetitem(BaseDecimal, base.BaseSetitemTests): @@ -207,8 +200,7 @@ class TestSetitem(BaseDecimal, base.BaseSetitemTests): class TestPrinting(BaseDecimal, base.BasePrintingTests): - pytestmark = pytest.mark.skipif(compat.PY2, - reason="Unhashble dtype in Py2.") + pass # TODO(extension) @@ -398,9 +390,6 @@ def _formatting_values(self): return np.array(self) ser = pd.Series(DecimalArray2([decimal.Decimal('1.0')])) - # different levels for 2 vs. 3 - check_stacklevel = compat.PY3 - with tm.assert_produces_warning(DeprecationWarning, - check_stacklevel=check_stacklevel): + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=True): repr(ser) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 10fd21f89c564..3e328f545d52f 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -5,7 +5,7 @@ Note: -We currently store lists of UserDicts (Py3 only). Pandas has a few places +We currently store lists of UserDicts. Pandas has a few places internally that specifically check for dicts, and does non-scalar things in that case. We *want* the dictionaries to be treated as scalars, so we hack around pandas by using UserDicts. diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 8c7e99b7d0cc5..5b001325216bf 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -3,7 +3,7 @@ import pytest -from pandas.compat import PY2, PY36 +from pandas.compat import PY36 import pandas as pd from pandas.tests.extension import base @@ -11,8 +11,6 @@ from .array import JSONArray, JSONDtype, make_data -pytestmark = pytest.mark.skipif(PY2, reason="Py2 doesn't have a UserDict") - @pytest.fixture def dtype(): diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 41f5beb8c885d..84e20232c4116 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -4,7 +4,6 @@ from pandas.compat.numpy import _np_version_under1p16 import pandas as pd -from pandas import compat from pandas.core.arrays.numpy_ import PandasArray, PandasDtype import pandas.util.testing as tm @@ -276,21 +275,11 @@ def test_error(self, data, all_arithmetic_operators): pass def test_arith_series_with_scalar(self, data, all_arithmetic_operators): - if (compat.PY2 and - all_arithmetic_operators in {'__div__', '__rdiv__'}): - raise pytest.skip( - "Matching NumPy int / int -> float behavior." - ) super(TestArithmetics, self).test_arith_series_with_scalar( data, all_arithmetic_operators ) def test_arith_series_with_array(self, data, all_arithmetic_operators): - if (compat.PY2 and - all_arithmetic_operators in {'__div__', '__rdiv__'}): - raise pytest.skip( - "Matching NumPy int / int -> float behavior." - ) super(TestArithmetics, self).test_arith_series_with_array( data, all_arithmetic_operators ) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index ade77c8dadeda..3dc9c2face9ed 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -8,7 +8,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange +from pandas.compat import lrange from pandas.core.dtypes.common import ( is_categorical_dtype, is_interval_dtype, is_object_dtype) @@ -1342,14 +1342,12 @@ def test_ambiguous_warns(self): with tm.assert_produces_warning(FutureWarning): df.rename({0: 10}, {"A": "B"}) - @pytest.mark.skipif(PY2, reason="inspect.signature") def test_rename_signature(self): sig = inspect.signature(DataFrame.rename) parameters = set(sig.parameters) assert parameters == {"self", "mapper", "index", "columns", "axis", "inplace", "copy", "level", "errors"} - @pytest.mark.skipif(PY2, reason="inspect.signature") def test_reindex_signature(self): sig = inspect.signature(DataFrame.reindex) parameters = set(sig.parameters) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 88c8d89ec4b63..e405a21ca71db 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -8,13 +8,13 @@ import numpy as np import pytest -from pandas.compat import PY2, PY35, is_platform_windows, lrange +from pandas.compat import lrange import pandas.util._test_decorators as td import pandas as pd from pandas import ( - Categorical, DataFrame, MultiIndex, Series, Timestamp, compat, date_range, - isna, notna, to_datetime, to_timedelta) + Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna, + notna, to_datetime, to_timedelta) import pandas.core.algorithms as algorithms import pandas.core.nanops as nanops import pandas.util.testing as tm @@ -898,7 +898,6 @@ def test_var_std(self, datetime_frame): result = nanops.nanvar(arr, axis=0) assert not (result < 0).any() - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") @pytest.mark.parametrize( "meth", ['sem', 'var', 'std']) def test_numeric_only_flag(self, meth): @@ -1010,7 +1009,6 @@ def test_mode_dropna(self, dropna, expected): expected = DataFrame(expected) tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(not compat.PY3, reason="only PY3") def test_mode_sortwarning(self): # Check for the warning that is raised when the mode # results cannot be sorted @@ -1372,7 +1370,6 @@ def test_pct_change(self): # ---------------------------------------------------------------------- # Index of max / min - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_idxmin(self, float_frame, int_frame): frame = float_frame frame.loc[5:10] = np.nan @@ -1390,7 +1387,6 @@ def test_idxmin(self, float_frame, int_frame): with pytest.raises(ValueError, match=msg): frame.idxmin(axis=2) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_idxmax(self, float_frame, int_frame): frame = float_frame frame.loc[5:10] = np.nan @@ -1855,9 +1851,6 @@ def test_numpy_round(self): with pytest.raises(ValueError, match=msg): np.round(df, decimals=0, out=df) - @pytest.mark.xfail( - PY2 and is_platform_windows(), reason="numpy/numpy#7882", - raises=AssertionError, strict=True) def test_numpy_round_nan(self): # See gh-14197 df = Series([1.53, np.nan, 0.06]).to_frame() @@ -1897,10 +1890,6 @@ def test_round_issue(self): df.round(decimals) def test_built_in_round(self): - if not compat.PY3: - pytest.skip("build in round cannot be overridden " - "prior to Python 3") - # GH 11763 # Here's the test frame we'll be working with df = DataFrame( @@ -2157,8 +2146,6 @@ def test_dot(self): with pytest.raises(ValueError, match='aligned'): df.dot(df2) - @pytest.mark.skipif(not PY35, - reason='matmul supported for Python>=3.5') def test_matmul(self): # matmul test is for GH 10259 a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index badfa0ca8fd15..5a5335281d41d 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas.compat import PY2, long, lrange, range +from pandas.compat import long, lrange, range import pandas as pd from pandas import ( @@ -360,7 +360,6 @@ def test_transpose(self, float_frame): for col, s in compat.iteritems(mixed_T): assert s.dtype == np.object_ - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_swapaxes(self): df = self.klass(np.random.randn(10, 5)) self._assert_frame_equal(df.T, df.swapaxes(0, 1)) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index cf8c55f00b061..3e45f23ac70d2 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -7,7 +7,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange, lzip, u +from pandas.compat import lrange, lzip, u from pandas.errors import PerformanceWarning import pandas as pd @@ -1051,7 +1051,6 @@ def test_reindex_corner(self): smaller = self.intframe.reindex(columns=['A', 'B', 'E']) assert smaller['E'].dtype == np.float64 - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_reindex_axis(self): cols = ['A', 'B', 'E'] with tm.assert_produces_warning(FutureWarning) as m: diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 1d5cbfec8de52..ab064ed98e31f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -12,8 +12,7 @@ import pytest from pandas.compat import ( - PY2, PY3, PY36, is_platform_little_endian, lmap, long, lrange, lzip, range, - zip) + PY36, is_platform_little_endian, lmap, long, lrange, lzip, range, zip) from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import is_integer_dtype @@ -21,7 +20,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp, - _np_version_under1p13, compat, date_range, isna) + compat, date_range, isna) from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -163,9 +162,7 @@ def test_constructor_dtype_str_na_values(self, string_dtype): def test_constructor_rec(self): rec = self.frame.to_records(index=False) - if PY3: - # unicode error under PY2 - rec.dtype.names = list(rec.dtype.names)[::-1] + rec.dtype.names = list(rec.dtype.names)[::-1] index = self.frame.index @@ -684,8 +681,6 @@ def test_constructor_ndarray(self): frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A']) assert len(frame) == 2 - @pytest.mark.skipif(PY2 and _np_version_under1p13, - reason="old numpy & py2") def test_constructor_maskedarray(self): self._check_basic_constructor(ma.masked_all) @@ -702,8 +697,6 @@ def test_constructor_maskedarray(self): frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2]) assert np.all(~np.asarray(frame == frame)) - @pytest.mark.skipif(PY2 and _np_version_under1p13, - reason="old numpy & py2") def test_constructor_maskedarray_nonfloat(self): # masked int promoted to float mat = ma.masked_all((2, 3), dtype=int) @@ -771,8 +764,6 @@ def test_constructor_maskedarray_nonfloat(self): assert frame['A'][1] is True assert frame['C'][2] is False - @pytest.mark.skipif(PY2 and _np_version_under1p13, - reason="old numpy & py2") def test_constructor_maskedarray_hardened(self): # Check numpy masked arrays with hard masks -- from GH24574 mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask() @@ -795,8 +786,6 @@ def test_constructor_maskedarray_hardened(self): dtype=float) tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(PY2 and _np_version_under1p13, - reason="old numpy & py2") def test_constructor_maskedrecarray_dtype(self): # Ensure constructor honors dtype data = np.ma.array( @@ -808,8 +797,6 @@ def test_constructor_maskedrecarray_dtype(self): columns=['date', 'price']) tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(PY2 and _np_version_under1p13, - reason="old numpy & py2") def test_constructor_mrecarray(self): # Ensure mrecarray produces frame identical to dict of masked arrays # from GH3479 @@ -1711,7 +1698,6 @@ def test_constructor_series_copy(self): assert not (series['A'] == 5).all() - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_constructor_with_nas(self): # GH 5016 # na's in indices diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index ffe54f7a94307..07b6a28b90619 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -9,7 +9,7 @@ import pytest from pandas._libs.tslib import iNaT -from pandas.compat import PY2, long, lrange, lzip, map, range, zip +from pandas.compat import long, lrange, lzip, map, range, zip from pandas.core.dtypes.common import is_float_dtype, is_integer, is_scalar from pandas.core.dtypes.dtypes import CategoricalDtype @@ -270,7 +270,7 @@ def test_getitem_boolean(self): # test df[df > 0] for df in [self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int]: - if compat.PY3 and df is self.mixed_frame: + if df is self.mixed_frame: continue data = df._get_numeric_data() @@ -869,7 +869,6 @@ def test_getitem_fancy_slice_integers_step(self): df.iloc[:8:2] = np.nan assert isna(df.iloc[:8:2]).values.all() - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_getitem_setitem_integer_slice_keyerrors(self): df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2)) @@ -1084,7 +1083,6 @@ def test_fancy_getitem_int_labels(self): expected = df[3] assert_series_equal(result, expected) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_fancy_index_int_labels_exceptions(self): df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2)) @@ -1516,7 +1514,6 @@ def test_getitem_setitem_boolean_multi(self): expected.loc[[0, 2], [1]] = 5 assert_frame_equal(df, expected) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_getitem_setitem_float_labels(self): index = Index([1.5, 2, 3, 4, 5]) df = DataFrame(np.random.randn(5, 5), index=index) @@ -1835,7 +1832,6 @@ def test_set_value(self): self.frame.set_value(idx, col, 1) assert self.frame[col][idx] == 1 - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_set_value_resize(self): with tm.assert_produces_warning(FutureWarning, @@ -2627,7 +2623,7 @@ def _check_get(df, cond, check_dtypes=True): # check getting for df in [default_frame, self.mixed_frame, self.mixed_float, self.mixed_int]: - if compat.PY3 and df is self.mixed_frame: + if df is self.mixed_frame: with pytest.raises(TypeError): df > 0 continue @@ -2678,7 +2674,7 @@ def _check_align(df, cond, other, check_dtypes=True): assert (rs.dtypes == df.dtypes).all() for df in [self.mixed_frame, self.mixed_float, self.mixed_int]: - if compat.PY3 and df is self.mixed_frame: + if df is self.mixed_frame: with pytest.raises(TypeError): df > 0 continue @@ -2733,7 +2729,7 @@ def _check_set(df, cond, check_dtypes=True): for df in [default_frame, self.mixed_frame, self.mixed_float, self.mixed_int]: - if compat.PY3 and df is self.mixed_frame: + if df is self.mixed_frame: with pytest.raises(TypeError): df > 0 continue diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 94be24710362a..1ac0d2e5c82d3 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange +from pandas.compat import lrange import pandas.util._test_decorators as td import pandas as pd @@ -83,7 +83,6 @@ def test_dropIncompleteRows(self, float_frame): tm.assert_index_equal(samesize_frame.index, float_frame.index) tm.assert_index_equal(inp_frame2.index, float_frame.index) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_dropna(self): df = DataFrame(np.random.randn(6, 4)) df[2][:2] = np.nan diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 9707ae80e6812..4f671b9dc398a 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -726,7 +726,7 @@ def test_inplace_ops_identity(self): 'xor']) def test_inplace_ops_identity2(self, op): - if compat.PY3 and op == 'div': + if op == 'div': return df = DataFrame({'a': [1., 2., 3.], diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py index facbfdd0c032b..5d81f1c8addf5 100644 --- a/pandas/tests/frame/test_quantile.py +++ b/pandas/tests/frame/test_quantile.py @@ -5,8 +5,6 @@ import numpy as np import pytest -from pandas.compat import PY2 - import pandas as pd from pandas import DataFrame, Series, Timestamp from pandas.tests.frame.common import TestData @@ -73,7 +71,6 @@ def test_quantile_axis_mixed(self): with pytest.raises(TypeError): df.quantile(.5, axis=1, numeric_only=False) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_quantile_axis_parameter(self): # GH 9543/9544 diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 4a7cb7f508926..d0125c0a94361 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -142,18 +142,11 @@ def test_repr_unicode(self): def test_unicode_string_with_unicode(self): df = DataFrame({'A': [u("\u05d0")]}) - - if compat.PY3: - str(df) - else: - compat.text_type(df) + str(df) def test_bytestring_with_unicode(self): df = DataFrame({'A': [u("\u05d0")]}) - if compat.PY3: - bytes(df) - else: - str(df) + bytes(df) def test_very_wide_info_repr(self): df = DataFrame(np.random.randn(10, 20), diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index baf50982d8ab0..7d623053334d6 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -7,7 +7,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange +from pandas.compat import lrange import pandas as pd from pandas import ( @@ -21,7 +21,6 @@ class TestDataFrameSorting(TestData): - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_sort_values(self): frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list('ABC')) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 9965be9091451..d303b5272dd1b 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -8,7 +8,7 @@ import pytest import pytz -from pandas.compat import PY2, product +from pandas.compat import product import pandas as pd from pandas import ( @@ -836,7 +836,6 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): 'new': [1e9, None]}, dtype='datetime64[ns]') tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_frame_to_period(self): K = 5 diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index c40544d6e3f86..65608d95ba770 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -6,7 +6,7 @@ import numpy as np import pytest -from pandas.compat import PY2, PY3, range, zip +from pandas.compat import range, zip from pandas.core.dtypes.common import is_scalar @@ -482,8 +482,7 @@ def test_api_compat(self): for func in ['sum', 'cumsum', 'any', 'var']: f = getattr(obj, func) assert f.__name__ == func - if PY3: - assert f.__qualname__.endswith(func) + assert f.__qualname__.endswith(func) def test_stat_non_defaults_args(self): obj = self._construct(5) @@ -693,7 +692,6 @@ def test_sample(sel): with pytest.raises(ValueError): df.sample(1, weights=s4) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_squeeze(self): # noop for s in [tm.makeFloatSeries(), tm.makeStringSeries(), diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py index b7302b3911e58..2bd2f3fb00b56 100644 --- a/pandas/tests/groupby/test_whitelist.py +++ b/pandas/tests/groupby/test_whitelist.py @@ -8,7 +8,7 @@ import numpy as np import pytest -from pandas import DataFrame, Index, MultiIndex, Series, compat, date_range +from pandas import DataFrame, Index, MultiIndex, Series, date_range from pandas.util import testing as tm AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew', @@ -126,12 +126,11 @@ def check_whitelist(obj, df, m): assert n == m # qualname - if compat.PY3: - try: - n = f.__qualname__ - except AttributeError: - return - assert n.endswith(m) + try: + n = f.__qualname__ + except AttributeError: + return + assert n.endswith(m) def test_groupby_series_whitelist(df_letters, s_whitelist_fixture): diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 6d29c147c4a4a..1c0ccfc0b59dd 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -5,7 +5,6 @@ from pandas._libs.tslib import iNaT import pandas.compat as compat -from pandas.compat import PY3 from pandas.core.dtypes.dtypes import CategoricalDtype @@ -133,8 +132,7 @@ def test_numeric_compat(self): with pytest.raises(TypeError, match="cannot perform __rmul__"): 1 * idx - div_err = ("cannot perform __truediv__" if PY3 - else "cannot perform __div__") + div_err = "cannot perform __truediv__" with pytest.raises(TypeError, match=div_err): idx / 1 diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index fc6080e68a803..1ea5bc17978ed 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -289,9 +289,8 @@ def test_datetime_name_accessors(self, time_locale): # work around different normalization schemes # https://github.com/pandas-dev/pandas/issues/22342 - if not compat.PY2: - result = result.str.normalize("NFD") - expected = expected.str.normalize("NFD") + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") tm.assert_index_equal(result, expected) @@ -299,9 +298,8 @@ def test_datetime_name_accessors(self, time_locale): result = date.month_name(locale=time_locale) expected = expected.capitalize() - if not compat.PY2: - result = unicodedata.normalize("NFD", result) - expected = unicodedata.normalize("NFD", result) + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", result) assert result == expected dti = dti.append(DatetimeIndex([pd.NaT])) diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index b25918417efcd..3dd836cf5b2d8 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -12,7 +12,7 @@ import pytz from pandas._libs.tslibs import conversion, timezones -from pandas.compat import PY3, lrange, zip +from pandas.compat import lrange, zip import pandas.util._test_decorators as td import pandas as pd @@ -1084,7 +1084,6 @@ def test_dti_union_aware(self): @pytest.mark.parametrize('tz', [None, 'UTC', "US/Central", dateutil.tz.tzoffset(None, -28800)]) @pytest.mark.usefixtures("datetime_tz_utc") - @pytest.mark.skipif(not PY3, reason="datetime.timezone not in PY2") def test_iteration_preserves_nanoseconds(self, tz): # GH 19603 index = DatetimeIndex(["2018-02-08 15:00:00.168456358", diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 22e589beb8ba1..6c41119b12579 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -14,7 +14,7 @@ from pandas._libs import tslib from pandas._libs.tslibs import iNaT, parsing -from pandas.compat import PY3, lmap +from pandas.compat import lmap from pandas.errors import OutOfBoundsDatetime import pandas.util._test_decorators as td @@ -386,9 +386,6 @@ def test_to_datetime_today(self): def test_to_datetime_today_now_unicode_bytes(self): to_datetime([u'now']) to_datetime([u'today']) - if not PY3: - to_datetime(['now']) - to_datetime(['today']) @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_dt64s(self, cache): diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index d5a6e9acaa5f3..7b8165ce295ec 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange +from pandas.compat import lrange from pandas.compat.numpy import _np_version_under1p17 import pandas as pd @@ -275,7 +275,6 @@ def test_map_dictlike(idx, mapper): tm.assert_index_equal(result, expected) -@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") @pytest.mark.parametrize('func', [ np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index 89685b9feec27..a014718d62d85 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from pandas.compat import PY3, long +from pandas.compat import long from pandas import MultiIndex import pandas.util.testing as tm @@ -17,8 +17,7 @@ def test_numeric_compat(idx): with pytest.raises(TypeError, match="cannot perform __rmul__"): 1 * idx - div_err = ("cannot perform __truediv__" if PY3 - else "cannot perform __div__") + div_err = "cannot perform __truediv__" with pytest.raises(TypeError, match=div_err): idx / 1 diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py index ac167c126fd13..5a4594dce8d66 100644 --- a/pandas/tests/indexes/multi/test_drop.py +++ b/pandas/tests/indexes/multi/test_drop.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange +from pandas.compat import lrange from pandas.errors import PerformanceWarning import pandas as pd @@ -12,7 +12,6 @@ import pandas.util.testing as tm -@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_drop(idx): dropped = idx.drop([('foo', 'two'), ('qux', 'one')]) diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py index a10b7220b8aa0..273822de19eb6 100644 --- a/pandas/tests/indexes/multi/test_format.py +++ b/pandas/tests/indexes/multi/test_format.py @@ -5,7 +5,7 @@ import pytest -from pandas.compat import PY3, range, u +from pandas.compat import range, u import pandas as pd from pandas import MultiIndex, compat @@ -67,14 +67,7 @@ def test_repr_roundtrip(): names=['first', 'second']) str(mi) - if PY3: - tm.assert_index_equal(eval(repr(mi)), mi, exact=True) - else: - result = eval(repr(mi)) - # string coerces to unicode - tm.assert_index_equal(result, mi, exact=False) - assert mi.get_level_values('first').inferred_type == 'string' - assert result.get_level_values('first').inferred_type == 'unicode' + tm.assert_index_equal(eval(repr(mi)), mi, exact=True) mi_u = MultiIndex.from_product( [list(u'ab'), range(3)], names=['first', 'second']) @@ -82,23 +75,13 @@ def test_repr_roundtrip(): tm.assert_index_equal(result, mi_u, exact=True) # formatting - if PY3: - str(mi) - else: - compat.text_type(mi) + str(mi) # long format mi = MultiIndex.from_product([list('abcdefg'), range(10)], names=['first', 'second']) - if PY3: - tm.assert_index_equal(eval(repr(mi)), mi, exact=True) - else: - result = eval(repr(mi)) - # string coerces to unicode - tm.assert_index_equal(result, mi, exact=False) - assert mi.get_level_values('first').inferred_type == 'string' - assert result.get_level_values('first').inferred_type == 'unicode' + tm.assert_index_equal(eval(repr(mi)), mi, exact=True) result = eval(repr(mi_u)) tm.assert_index_equal(result, mi_u, exact=True) @@ -107,21 +90,13 @@ def test_repr_roundtrip(): def test_unicode_string_with_unicode(): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index - - if PY3: - str(idx) - else: - compat.text_type(idx) + str(idx) def test_bytestring_with_unicode(): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index - - if PY3: - bytes(idx) - else: - str(idx) + bytes(idx) def test_repr_max_seq_item_setting(idx): diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index c2af3b2050d8d..e32fa97574155 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -6,7 +6,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange +from pandas.compat import lrange import pandas as pd from pandas import ( @@ -255,7 +255,6 @@ def test_getitem_bool_index_single(ind1, ind2): tm.assert_index_equal(idx[ind2], expected) -@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_get_loc(idx): assert idx.get_loc(('foo', 'two')) == 1 assert idx.get_loc(('baz', 'two')) == 3 diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py index f1adeca7245f6..cae055aecde85 100644 --- a/pandas/tests/indexes/period/test_construction.py +++ b/pandas/tests/indexes/period/test_construction.py @@ -2,7 +2,7 @@ import pytest from pandas._libs.tslibs.period import IncompatibleFrequency -from pandas.compat import PY3, lmap, lrange, text_type +from pandas.compat import lmap, lrange, text_type from pandas.core.dtypes.dtypes import PeriodDtype @@ -513,11 +513,7 @@ def test_recreate_from_data(self, freq): def test_map_with_string_constructor(self): raw = [2005, 2007, 2009] index = PeriodIndex(raw, freq='A') - types = str, - - if PY3: - # unicode - types += text_type, + types = [str, text_type] for t in types: expected = Index(lmap(t, raw)) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 773bb91e39376..490a41f023f8e 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -11,8 +11,7 @@ import pytest from pandas._libs.tslib import Timestamp -from pandas.compat import ( - PY3, PY35, PY36, StringIO, lrange, lzip, range, text_type, u, zip) +from pandas.compat import PY36, StringIO, lrange, lzip, range, u, zip from pandas.compat.numpy import np_datetime64_compat from pandas.core.dtypes.common import is_unsigned_integer_dtype @@ -1480,13 +1479,8 @@ def test_get_loc_raises_bad_label(self, method): # Messages vary across versions if PY36: msg = 'not supported between' - elif PY35: - msg = 'unorderable types' else: - if method == 'nearest': - msg = 'unsupported operand' - else: - msg = 'requires scalar valued input' + msg = 'unorderable types' else: msg = 'invalid key' @@ -2067,7 +2061,6 @@ def test_dt_conversion_preserves_name(self, dt_conv): index = pd.Index(['01:02:03', '01:02:04'], name='label') assert index.name == dt_conv(index).name - @pytest.mark.skipif(not PY3, reason="compat test") @pytest.mark.parametrize("index,expected", [ # ASCII # short @@ -2113,53 +2106,6 @@ def test_string_index_repr(self, index, expected): result = repr(index) assert result == expected - @pytest.mark.skipif(PY3, reason="compat test") - @pytest.mark.parametrize("index,expected", [ - # ASCII - # short - (pd.Index(['a', 'bb', 'ccc']), - u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""), - # multiple lines - (pd.Index(['a', 'bb', 'ccc'] * 10), - u"""\ -Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', - u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', - u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], - dtype='object')"""), - # truncated - (pd.Index(['a', 'bb', 'ccc'] * 100), - u"""\ -Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', - ... - u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], - dtype='object', length=300)"""), - - # Non-ASCII - # short - (pd.Index([u'あ', u'いい', u'ううう']), - u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""), - # multiple lines - (pd.Index([u'あ', u'いい', u'ううう'] * 10), - (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " - u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" - u" dtype='object')")), - # truncated - (pd.Index([u'あ', u'いい', u'ううう'] * 100), - (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" - u" ...\n" - u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " - u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" - u" dtype='object', length=300)"))]) - def test_string_index_repr_compat(self, index, expected): - result = unicode(index) # noqa - assert result == expected - - @pytest.mark.skipif(not PY3, reason="compat test") @pytest.mark.parametrize("index,expected", [ # short (pd.Index([u'あ', u'いい', u'ううう']), @@ -2191,40 +2137,6 @@ def test_string_index_repr_with_unicode_option(self, index, expected): result = repr(index) assert result == expected - @pytest.mark.skipif(PY3, reason="compat test") - @pytest.mark.parametrize("index,expected", [ - # short - (pd.Index([u'あ', u'いい', u'ううう']), - (u"Index([u'あ', u'いい', u'ううう'], " - u"dtype='object')")), - # multiple lines - (pd.Index([u'あ', u'いい', u'ううう'] * 10), - (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう'],\n" - u" dtype='object')")), - # truncated - (pd.Index([u'あ', u'いい', u'ううう'] * 100), - (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ',\n" - u" ...\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう'],\n" - u" dtype='object', length=300)"))]) - def test_string_index_repr_with_unicode_option_compat(self, index, - expected): - # Enable Unicode option ----------------------------------------- - with cf.option_context('display.unicode.east_asian_width', True): - result = unicode(index) # noqa - assert result == expected - def test_cached_properties_not_settable(self): index = pd.Index([1, 2, 3]) with pytest.raises(AttributeError, match="Can't set attribute"): @@ -2265,27 +2177,19 @@ def test_argsort(self): index = self.create_index() if PY36: with pytest.raises(TypeError, match="'>|<' not supported"): - result = index.argsort() - elif PY3: - with pytest.raises(TypeError, match="unorderable types"): - result = index.argsort() + index.argsort() else: - result = index.argsort() - expected = np.array(index).argsort() - tm.assert_numpy_array_equal(result, expected, check_dtype=False) + with pytest.raises(TypeError, match="unorderable types"): + index.argsort() def test_numpy_argsort(self): index = self.create_index() if PY36: with pytest.raises(TypeError, match="'>|<' not supported"): - result = np.argsort(index) - elif PY3: - with pytest.raises(TypeError, match="unorderable types"): - result = np.argsort(index) + np.argsort(index) else: - result = np.argsort(index) - expected = index.argsort() - tm.assert_numpy_array_equal(result, expected) + with pytest.raises(TypeError, match="unorderable types"): + np.argsort(index) def test_copy_name(self): # Check that "name" argument passed at initialization is honoured @@ -2489,17 +2393,10 @@ def test_print_unicode_columns(self): "c": [7, 8, 9]}) repr(df.columns) # should not raise UnicodeDecodeError - @pytest.mark.parametrize("func,compat_func", [ - (str, text_type), # unicode string - (bytes, str) # byte string - ]) - def test_with_unicode(self, func, compat_func): + @pytest.mark.parametrize("func", [str, bytes]) + def test_with_unicode(self, func): index = Index(lrange(1000)) - - if PY3: - func(index) - else: - compat_func(index) + func(index) def test_intersect_str_dates(self): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 95fac2f6ae05b..42c4df297813a 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -4,12 +4,12 @@ import pytest from pandas._libs import index as libindex -from pandas.compat import PY3, range +from pandas.compat import range from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd -from pandas import Categorical, IntervalIndex, compat +from pandas import Categorical, IntervalIndex import pandas.core.config as cf from pandas.core.indexes.api import CategoricalIndex, Index import pandas.util.testing as tm @@ -17,9 +17,6 @@ from .common import Base -if PY3: - unicode = lambda x: x - class TestCategoricalIndex(Base): _holder = CategoricalIndex @@ -705,18 +702,12 @@ def test_repr_roundtrip(self): tm.assert_index_equal(eval(repr(ci)), ci, exact=True) # formatting - if PY3: - str(ci) - else: - compat.text_type(ci) + str(ci) # long format # this is not reprable ci = CategoricalIndex(np.random.randint(0, 5, size=100)) - if PY3: - str(ci) - else: - compat.text_type(ci) + str(ci) def test_isin(self): @@ -842,193 +833,102 @@ def test_frame_repr(self): def test_string_categorical_index_repr(self): # short idx = pd.CategoricalIndex(['a', 'bb', 'ccc']) - if PY3: - expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa - assert unicode(idx) == expected + expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa + assert repr(idx) == expected # multiple lines idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10) - if PY3: - expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', - u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', - u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', - u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], - categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # truncated idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100) - if PY3: - expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', ... 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', - u'ccc', u'a', - ... - u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', - u'bb', u'ccc'], - categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # larger categories idx = pd.CategoricalIndex(list('abcdefghijklmmo')) - if PY3: - expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'm', 'o'], categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j', - u'k', u'l', u'm', u'm', u'o'], - categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # short idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう']) - if PY3: - expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa - assert unicode(idx) == expected + expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa + assert repr(idx) == expected # multiple lines idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10) - if PY3: - expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', - u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', - u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', - u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'], - categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # truncated idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100) - if PY3: - expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', ... 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', - u'ううう', u'あ', - ... - u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', - u'いい', u'ううう'], - categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # larger categories idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ')) - if PY3: - expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', + expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ'], categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ', - u'さ', u'し', u'す', u'せ', u'そ'], - categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # Emable Unicode option ----------------------------------------- with cf.option_context('display.unicode.east_asian_width', True): # short idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう']) - if PY3: - expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa - assert unicode(idx) == expected + expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa + assert repr(idx) == expected # multiple lines idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10) - if PY3: - expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', - u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', - u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', - u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', - u'いい', u'ううう', u'あ', u'いい', u'ううう'], - categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # truncated idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100) - if PY3: - expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', ... 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', - u'いい', u'ううう', u'あ', - ... - u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', - u'ううう', u'あ', u'いい', u'ううう'], - categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected # larger categories idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ')) - if PY3: - expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', + expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ'], categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa - assert repr(idx) == expected - else: - expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', - u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'], - categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa - - assert unicode(idx) == expected + assert repr(idx) == expected def test_fillna_categorical(self): # GH 11343 diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 26413f4519eff..0a8c54f8136a7 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -7,7 +7,7 @@ import pytest from pandas._libs.tslibs import Timestamp -from pandas.compat import PY2, range +from pandas.compat import range import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index @@ -155,7 +155,6 @@ def test_constructor(self): result = Index(np.array([np.nan])) assert pd.isna(result.values).all() - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_constructor_invalid(self): # invalid diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 583e6bd81bb99..717a63d8c8d49 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from pandas.compat import PY3, range, u +from pandas.compat import range, u import pandas as pd from pandas import Float64Index, Index, Int64Index, RangeIndex, Series @@ -180,10 +180,7 @@ def test_copy(self): def test_repr(self): i = RangeIndex(5, name='Foo') result = repr(i) - if PY3: - expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')" - else: - expected = "RangeIndex(start=0, stop=5, step=1, name=u'Foo')" + expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')" assert result == expected result = eval(result) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 280db3b2b3004..dec7708d78e83 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -828,7 +828,7 @@ class TestReplaceSeriesCoercion(CoercionBase): 'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]', 'timedelta64[ns]'] ) def test_replace_series(self, how, to_key, from_key): - if from_key == 'bool' and how == 'series' and compat.PY3: + if from_key == 'bool' and how == 'series': # doesn't work in PY3, though ...dict_from_bool works fine pytest.skip("doesn't work as in PY3") diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index b9b47338c9de2..c8732f0713b6d 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -542,10 +542,7 @@ def test_integer_positional_indexing(self): slice(2.0, 4), slice(2.0, 4.0)]: - if compat.PY2: - klass = Int64Index - else: - klass = RangeIndex + klass = RangeIndex msg = ("cannot do slice indexing" r" on {klass} with these indexers \[(2|4)\.0\] of" " {kind}" diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 03f1975c50d2a..e6ee35572c252 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -10,7 +10,7 @@ import numpy as np import pytest -from pandas.compat import PY2, lrange, range +from pandas.compat import lrange, range from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype @@ -214,8 +214,6 @@ def test_dups_fancy_indexing(self): result = df.loc[['A', 'A', 'E']] tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(PY2, - reason="GH-20770. Py2 unreliable warnings catching.") def test_dups_fancy_indexing2(self): # GH 5835 # dups on index and missing values diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index c4f98b892feb7..ae256a0fded2b 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from pandas.compat import PY2, StringIO, lrange +from pandas.compat import StringIO, lrange import pandas as pd from pandas import DataFrame, Series, Timestamp, date_range @@ -146,8 +146,6 @@ def test_loc_getitem_label_list(self): [Timestamp('20130102'), Timestamp('20130103')], typs=['ts'], axes=0) - @pytest.mark.skipif(PY2, reason=("Catching warnings unreliable with " - "Python 2 (GH #20770)")) def test_loc_getitem_label_list_with_missing(self): self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2], typs=['empty'], fails=KeyError) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 43bb382ea3f20..e9fd42c2a7216 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -22,7 +22,7 @@ import pandas.compat as compat from pandas.compat import ( - PY3, StringIO, is_platform_32bit, is_platform_windows, lrange, lzip, range, + StringIO, is_platform_32bit, is_platform_windows, lrange, lzip, range, u, zip) import pandas as pd @@ -554,11 +554,6 @@ def test_to_string_with_formatters_unicode(self): assert result == u(' c/\u03c3\n') + '0 1\n1 2\n2 3' def test_east_asian_unicode_false(self): - if PY3: - _rep = repr - else: - _rep = unicode # noqa - # not alighned properly because of east asian width # mid col @@ -568,7 +563,7 @@ def test_east_asian_unicode_false(self): expected = (u" a b\na あ 1\n" u"bb いいい 222\nc う 33333\n" u"ddd ええええええ 4") - assert _rep(df) == expected + assert repr(df) == expected # last col df = DataFrame({'a': [1, 222, 33333, 4], @@ -577,7 +572,7 @@ def test_east_asian_unicode_false(self): expected = (u" a b\na 1 あ\n" u"bb 222 いいい\nc 33333 う\n" u"ddd 4 ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # all col df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'], @@ -586,7 +581,7 @@ def test_east_asian_unicode_false(self): expected = (u" a b\na あああああ あ\n" u"bb い いいい\nc う う\n" u"ddd えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # column name df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'], @@ -595,7 +590,7 @@ def test_east_asian_unicode_false(self): expected = (u" b あああああ\na あ 1\n" u"bb いいい 222\nc う 33333\n" u"ddd ええええええ 4") - assert _rep(df) == expected + assert repr(df) == expected # index df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'], @@ -604,7 +599,7 @@ def test_east_asian_unicode_false(self): expected = (u" a b\nあああ あああああ あ\n" u"いいいいいい い いいい\nうう う う\n" u"え えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # index name df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'], @@ -617,7 +612,7 @@ def test_east_asian_unicode_false(self): u"い い いいい\n" u"うう う う\n" u"え えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # all df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'], @@ -630,7 +625,7 @@ def test_east_asian_unicode_false(self): u"いいい い いいい\n" u"うう う う\n" u"え えええええ ええ") - assert _rep(df) == expected + assert repr(df) == expected # MultiIndex idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), ( @@ -643,7 +638,7 @@ def test_east_asian_unicode_false(self): u"う え い いいい\n" u"おおお かかかか う う\n" u"き くく えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # truncate with option_context('display.max_rows', 3, 'display.max_columns', 3): @@ -656,21 +651,16 @@ def test_east_asian_unicode_false(self): expected = (u" a ... ああああ\n0 あああああ ... さ\n" u".. ... ... ...\n3 えええ ... せ\n" u"\n[4 rows x 4 columns]") - assert _rep(df) == expected + assert repr(df) == expected df.index = [u'あああ', u'いいいい', u'う', 'aaa'] expected = (u" a ... ああああ\nあああ あああああ ... さ\n" u".. ... ... ...\naaa えええ ... せ\n" u"\n[4 rows x 4 columns]") - assert _rep(df) == expected + assert repr(df) == expected def test_east_asian_unicode_true(self): - if PY3: - _rep = repr - else: - _rep = unicode # noqa - - # Emable Unicode option ----------------------------------------- + # Enable Unicode option ----------------------------------------- with option_context('display.unicode.east_asian_width', True): # mid col @@ -680,7 +670,7 @@ def test_east_asian_unicode_true(self): expected = (u" a b\na あ 1\n" u"bb いいい 222\nc う 33333\n" u"ddd ええええええ 4") - assert _rep(df) == expected + assert repr(df) == expected # last col df = DataFrame({'a': [1, 222, 33333, 4], @@ -689,7 +679,7 @@ def test_east_asian_unicode_true(self): expected = (u" a b\na 1 あ\n" u"bb 222 いいい\nc 33333 う\n" u"ddd 4 ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # all col df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'], @@ -700,7 +690,7 @@ def test_east_asian_unicode_true(self): u"bb い いいい\n" u"c う う\n" u"ddd えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # column name df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'], @@ -711,7 +701,7 @@ def test_east_asian_unicode_true(self): u"bb いいい 222\n" u"c う 33333\n" u"ddd ええええええ 4") - assert _rep(df) == expected + assert repr(df) == expected # index df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'], @@ -722,7 +712,7 @@ def test_east_asian_unicode_true(self): u"いいいいいい い いいい\n" u"うう う う\n" u"え えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # index name df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'], @@ -735,7 +725,7 @@ def test_east_asian_unicode_true(self): u"い い いいい\n" u"うう う う\n" u"え えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # all df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'], @@ -748,7 +738,7 @@ def test_east_asian_unicode_true(self): u"いいい い いいい\n" u"うう う う\n" u"え えええええ ええ") - assert _rep(df) == expected + assert repr(df) == expected # MultiIndex idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), ( @@ -761,7 +751,7 @@ def test_east_asian_unicode_true(self): u"う え い いいい\n" u"おおお かかかか う う\n" u"き くく えええ ええええええ") - assert _rep(df) == expected + assert repr(df) == expected # truncate with option_context('display.max_rows', 3, 'display.max_columns', @@ -778,7 +768,7 @@ def test_east_asian_unicode_true(self): u".. ... ... ...\n" u"3 えええ ... せ\n" u"\n[4 rows x 4 columns]") - assert _rep(df) == expected + assert repr(df) == expected df.index = [u'あああ', u'いいいい', u'う', 'aaa'] expected = (u" a ... ああああ\n" @@ -786,7 +776,7 @@ def test_east_asian_unicode_true(self): u"... ... ... ...\n" u"aaa えええ ... せ\n" u"\n[4 rows x 4 columns]") - assert _rep(df) == expected + assert repr(df) == expected # ambiguous unicode df = DataFrame({'b': [u'あ', u'いいい', u'¡¡', u'ええええええ'], @@ -797,7 +787,7 @@ def test_east_asian_unicode_true(self): u"bb いいい 222\n" u"c ¡¡ 33333\n" u"¡¡¡ ええええええ 4") - assert _rep(df) == expected + assert repr(df) == expected def test_to_string_buffer_all_unicode(self): buf = StringIO() @@ -992,29 +982,6 @@ def test_frame_info_encoding(self): repr(df.T) fmt.set_option('display.max_rows', 200) - def test_pprint_thing(self): - from pandas.io.formats.printing import pprint_thing as pp_t - - if PY3: - pytest.skip("doesn't work on Python 3") - - assert pp_t('a') == u('a') - assert pp_t(u('a')) == u('a') - assert pp_t(None) == 'None' - assert pp_t(u('\u05d0'), quote_strings=True) == u("u'\u05d0'") - assert pp_t(u('\u05d0'), quote_strings=False) == u('\u05d0') - assert (pp_t((u('\u05d0'), u('\u05d1')), quote_strings=True) == - u("(u'\u05d0', u'\u05d1')")) - assert (pp_t((u('\u05d0'), (u('\u05d1'), u('\u05d2'))), - quote_strings=True) == u("(u'\u05d0', " - "(u'\u05d1', u'\u05d2'))")) - assert (pp_t(('foo', u('\u05d0'), (u('\u05d0'), u('\u05d0'))), - quote_strings=True) == u("(u'foo', u'\u05d0', " - "(u'\u05d0', u'\u05d0'))")) - - # gh-2038: escape embedded tabs in string - assert "\t" not in pp_t("a\tb", escape_chars=("\t", )) - def test_wide_repr(self): with option_context('mode.sim_interactive', True, 'display.show_dimensions', True, @@ -1897,10 +1864,6 @@ def test_unicode_name_in_footer(self): sf._get_footer() # should not raise exception def test_east_asian_unicode_series(self): - if PY3: - _rep = repr - else: - _rep = unicode # noqa # not aligned properly because of east asian width # unicode index @@ -1908,21 +1871,21 @@ def test_east_asian_unicode_series(self): index=[u'あ', u'いい', u'ううう', u'ええええ']) expected = (u"あ a\nいい bb\nううう CCC\n" u"ええええ D\ndtype: object") - assert _rep(s) == expected + assert repr(s) == expected # unicode values s = Series([u'あ', u'いい', u'ううう', u'ええええ'], index=['a', 'bb', 'c', 'ddd']) expected = (u"a あ\nbb いい\nc ううう\n" u"ddd ええええ\ndtype: object") - assert _rep(s) == expected + assert repr(s) == expected # both s = Series([u'あ', u'いい', u'ううう', u'ええええ'], index=[u'ああ', u'いいいい', u'う', u'えええ']) expected = (u"ああ あ\nいいいい いい\nう ううう\n" u"えええ ええええ\ndtype: object") - assert _rep(s) == expected + assert repr(s) == expected # unicode footer s = Series([u'あ', u'いい', u'ううう', u'ええええ'], @@ -1930,7 +1893,7 @@ def test_east_asian_unicode_series(self): name=u'おおおおおおお') expected = (u"ああ あ\nいいいい いい\nう ううう\n" u"えええ ええええ\nName: おおおおおおお, dtype: object") - assert _rep(s) == expected + assert repr(s) == expected # MultiIndex idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), ( @@ -1940,13 +1903,13 @@ def test_east_asian_unicode_series(self): u"う え 22\n" u"おおお かかかか 3333\n" u"き くく 44444\ndtype: int64") - assert _rep(s) == expected + assert repr(s) == expected # object dtype, shorter than unicode repr s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ']) expected = (u"1 1\nAB 22\nNaN 3333\n" u"あああ 44444\ndtype: int64") - assert _rep(s) == expected + assert repr(s) == expected # object dtype, longer than unicode repr s = Series([1, 22, 3333, 44444], @@ -1955,7 +1918,7 @@ def test_east_asian_unicode_series(self): u"AB 22\n" u"2011-01-01 00:00:00 3333\n" u"あああ 44444\ndtype: int64") - assert _rep(s) == expected + assert repr(s) == expected # truncate with option_context('display.max_rows', 3): @@ -1965,13 +1928,13 @@ def test_east_asian_unicode_series(self): expected = (u"0 あ\n ... \n" u"3 ええええ\n" u"Name: おおおおおおお, Length: 4, dtype: object") - assert _rep(s) == expected + assert repr(s) == expected s.index = [u'ああ', u'いいいい', u'う', u'えええ'] expected = (u"ああ あ\n ... \n" u"えええ ええええ\n" u"Name: おおおおおおお, Length: 4, dtype: object") - assert _rep(s) == expected + assert repr(s) == expected # Emable Unicode option ----------------------------------------- with option_context('display.unicode.east_asian_width', True): @@ -1981,14 +1944,14 @@ def test_east_asian_unicode_series(self): index=[u'あ', u'いい', u'ううう', u'ええええ']) expected = (u"あ a\nいい bb\nううう CCC\n" u"ええええ D\ndtype: object") - assert _rep(s) == expected + assert repr(s) == expected # unicode values s = Series([u'あ', u'いい', u'ううう', u'ええええ'], index=['a', 'bb', 'c', 'ddd']) expected = (u"a あ\nbb いい\nc ううう\n" u"ddd ええええ\ndtype: object") - assert _rep(s) == expected + assert repr(s) == expected # both s = Series([u'あ', u'いい', u'ううう', u'ええええ'], @@ -1997,7 +1960,7 @@ def test_east_asian_unicode_series(self): u"いいいい いい\n" u"う ううう\n" u"えええ ええええ\ndtype: object") - assert _rep(s) == expected + assert repr(s) == expected # unicode footer s = Series([u'あ', u'いい', u'ううう', u'ええええ'], @@ -2008,7 +1971,7 @@ def test_east_asian_unicode_series(self): u"う ううう\n" u"えええ ええええ\n" u"Name: おおおおおおお, dtype: object") - assert _rep(s) == expected + assert repr(s) == expected # MultiIndex idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'), ( @@ -2019,13 +1982,13 @@ def test_east_asian_unicode_series(self): u"おおお かかかか 3333\n" u"き くく 44444\n" u"dtype: int64") - assert _rep(s) == expected + assert repr(s) == expected # object dtype, shorter than unicode repr s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ']) expected = (u"1 1\nAB 22\nNaN 3333\n" u"あああ 44444\ndtype: int64") - assert _rep(s) == expected + assert repr(s) == expected # object dtype, longer than unicode repr s = Series([1, 22, 3333, 44444], @@ -2034,7 +1997,7 @@ def test_east_asian_unicode_series(self): u"AB 22\n" u"2011-01-01 00:00:00 3333\n" u"あああ 44444\ndtype: int64") - assert _rep(s) == expected + assert repr(s) == expected # truncate with option_context('display.max_rows', 3): @@ -2043,14 +2006,14 @@ def test_east_asian_unicode_series(self): expected = (u"0 あ\n ... \n" u"3 ええええ\n" u"Name: おおおおおおお, Length: 4, dtype: object") - assert _rep(s) == expected + assert repr(s) == expected s.index = [u'ああ', u'いいいい', u'う', u'えええ'] expected = (u"ああ あ\n" u" ... \n" u"えええ ええええ\n" u"Name: おおおおおおお, Length: 4, dtype: object") - assert _rep(s) == expected + assert repr(s) == expected # ambiguous unicode s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'], @@ -2059,7 +2022,7 @@ def test_east_asian_unicode_series(self): u"¡¡¡¡いい い¡¡\n" u"¡¡ ううう\n" u"えええ ええええ\ndtype: object") - assert _rep(s) == expected + assert repr(s) == expected def test_float_trim_zeros(self): vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10, diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 1929817a49b3c..1a28cafa2b520 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -51,13 +51,8 @@ def test_to_csv_defualt_encoding(self): with tm.ensure_clean('test.csv') as path: # the default to_csv encoding in Python 2 is ascii, and that in # Python 3 is uft-8. - if pd.compat.PY2: - # the encoding argument parameter should be utf-8 - with pytest.raises(UnicodeEncodeError, match='ascii'): - df.to_csv(path) - else: - df.to_csv(path) - tm.assert_frame_equal(pd.read_csv(path, index_col=0), df) + df.to_csv(path) + tm.assert_frame_equal(pd.read_csv(path, index_col=0), df) def test_to_csv_quotechar(self): df = DataFrame({'col': [1, 2]}) @@ -494,7 +489,6 @@ def test_to_csv_write_to_open_file(self): with open(path, 'r') as f: assert f.read() == expected - @pytest.mark.skipif(compat.PY2, reason="Test case for python3") def test_to_csv_write_to_open_file_with_newline_py3(self): # see gh-21696 # see gh-20353 @@ -512,24 +506,6 @@ def test_to_csv_write_to_open_file_with_newline_py3(self): with open(path, 'rb') as f: assert f.read() == bytes(expected, 'utf-8') - @pytest.mark.skipif(compat.PY3, reason="Test case for python2") - def test_to_csv_write_to_open_file_with_newline_py2(self): - # see gh-21696 - # see gh-20353 - df = pd.DataFrame({'a': ['x', 'y', 'z']}) - expected_rows = ["x", - "y", - "z"] - expected = ("manual header\n" + - tm.convert_rows_list_to_csv_str(expected_rows)) - with tm.ensure_clean('test.txt') as path: - with open(path, 'wb') as f: - f.write('manual header\n') - df.to_csv(f, header=None, index=None) - - with open(path, 'rb') as f: - assert f.read() == expected - @pytest.mark.parametrize("to_infer", [True, False]) @pytest.mark.parametrize("read_infer", [True, False]) def test_to_csv_compression(self, compression_only, diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 4bec3bca1820b..2efa33b1e95bb 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -6,7 +6,7 @@ from pandas.compat import u import pandas as pd -from pandas import DataFrame, Series, compat +from pandas import DataFrame, Series from pandas.util import testing as tm @@ -32,16 +32,10 @@ def test_to_latex_filename(self, frame): assert df.to_latex() == f.read() # test with utf-8 without encoding option - if compat.PY3: # python3: pandas default encoding is utf-8 - with tm.ensure_clean('test.tex') as path: - df.to_latex(path) - with codecs.open(path, 'r', encoding='utf-8') as f: - assert df.to_latex() == f.read() - else: - # python2 default encoding is ascii, so an error should be raised - with tm.ensure_clean('test.tex') as path: - with pytest.raises(UnicodeEncodeError): - df.to_latex(path) + with tm.ensure_clean('test.tex') as path: + df.to_latex(path) + with codecs.open(path, 'r', encoding='utf-8') as f: + assert df.to_latex() == f.read() def test_to_latex(self, frame): # it works! diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 5362274274d72..f034d52b82c4a 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas import DataFrame, Index, compat +from pandas import DataFrame, Index import pandas.util.testing as tm from pandas.io.json import json_normalize @@ -261,14 +261,10 @@ def test_record_prefix(self, state_data): tm.assert_frame_equal(result, expected) def test_non_ascii_key(self): - if compat.PY3: - testjson = ( - b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' + - b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]' - ).decode('utf8') - else: - testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' - '{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]') + testjson = ( + b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' + + b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]' + ).decode('utf8') testdata = { u'sub.A': [1, 3], diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index ed598b730d960..c1861ae152cd9 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -540,10 +540,7 @@ class BinaryThing(object): def __init__(self, hexed): self.hexed = hexed - if compat.PY2: - self.binary = hexed.decode('hex') - else: - self.binary = bytes.fromhex(hexed) + self.binary = bytes.fromhex(hexed) def __str__(self): return self.hexed @@ -1152,9 +1149,6 @@ def test_to_jsonl(self): assert_frame_equal(pd.read_json(result, lines=True), df) def test_latin_encoding(self): - if compat.PY2: - pytest.skip("[unicode] is not implemented as a table column") - # GH 13774 pytest.skip("encoding not implemented in .to_json(), " "xref #13774") diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 63ba9bc0f0488..5595a959c9a29 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -7,7 +7,6 @@ import calendar import datetime import decimal -from functools import partial import locale import math import re @@ -26,9 +25,6 @@ from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, date_range import pandas.util.testing as tm -json_unicode = (json.dumps if compat.PY3 - else partial(json.dumps, encoding="utf-8")) - def _clean_dict(d): """ @@ -174,8 +170,6 @@ def test_decimal_decode_test_precise(self): decoded = ujson.decode(encoded, precise_float=True) assert sut == decoded - @pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3, - reason="buggy on win-64 for py2") def test_encode_double_tiny_exponential(self): num = 1e-40 assert num == ujson.decode(ujson.encode(num)) @@ -272,7 +266,7 @@ def test_encode_unicode_conversion(self, unicode_input): enc = ujson.encode(unicode_input) dec = ujson.decode(enc) - assert enc == json_unicode(unicode_input) + assert enc == json.dumps(unicode_input) assert dec == json.loads(enc) def test_encode_control_escaping(self): @@ -281,14 +275,14 @@ def test_encode_control_escaping(self): dec = ujson.decode(enc) assert escaped_input == dec - assert enc == json_unicode(escaped_input) + assert enc == json.dumps(escaped_input) def test_encode_unicode_surrogate_pair(self): surrogate_input = "\xf0\x90\x8d\x86" enc = ujson.encode(surrogate_input) dec = ujson.decode(enc) - assert enc == json_unicode(surrogate_input) + assert enc == json.dumps(surrogate_input) assert dec == json.loads(enc) def test_encode_unicode_4bytes_utf8(self): @@ -296,7 +290,7 @@ def test_encode_unicode_4bytes_utf8(self): enc = ujson.encode(four_bytes_input) dec = ujson.decode(enc) - assert enc == json_unicode(four_bytes_input) + assert enc == json.dumps(four_bytes_input) assert dec == json.loads(enc) def test_encode_unicode_4bytes_utf8highest(self): @@ -305,7 +299,7 @@ def test_encode_unicode_4bytes_utf8highest(self): dec = ujson.decode(enc) - assert enc == json_unicode(four_bytes_input) + assert enc == json.dumps(four_bytes_input) assert dec == json.loads(enc) def test_encode_array_in_array(self): @@ -432,7 +426,7 @@ def test_encode_to_utf8(self): enc = ujson.encode(unencoded, ensure_ascii=False) dec = ujson.decode(enc) - assert enc == json_unicode(unencoded, ensure_ascii=False) + assert enc == json.dumps(unencoded, ensure_ascii=False) assert dec == json.loads(enc) def test_decode_from_unicode(self): @@ -521,11 +515,6 @@ def test_decode_invalid_dict(self, invalid_dict): def test_decode_numeric_int(self, numeric_int_as_str): assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str) - @pytest.mark.skipif(compat.PY3, reason="only PY2") - def test_encode_unicode_4bytes_utf8_fail(self): - with pytest.raises(OverflowError): - ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf") - def test_encode_null_character(self): wrapped_input = "31337 \x00 1337" output = ujson.encode(wrapped_input) @@ -657,14 +646,14 @@ def test_decode_number_with_32bit_sign_bit(self, val): def test_encode_big_escape(self): # Make sure no Exception is raised. for _ in range(10): - base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5" + base = '\u00e5'.encode("utf-8") escape_input = base * 1024 * 1024 * 2 ujson.encode(escape_input) def test_decode_big_escape(self): # Make sure no Exception is raised. for _ in range(10): - base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5" + base = '\u00e5'.encode("utf-8") quote = compat.str_to_bytes("\"") escape_input = quote + (base * 1024 * 1024 * 2) + quote diff --git a/pandas/tests/io/msgpack/common.py b/pandas/tests/io/msgpack/common.py index 434d347c5742a..60c1c0db18de8 100644 --- a/pandas/tests/io/msgpack/common.py +++ b/pandas/tests/io/msgpack/common.py @@ -1,9 +1,2 @@ -from pandas.compat import PY3 - -# array compat -if PY3: - frombytes = lambda obj, data: obj.frombytes(data) - tobytes = lambda obj: obj.tobytes() -else: - frombytes = lambda obj, data: obj.fromstring(data) - tobytes = lambda obj: obj.tostring() +frombytes = lambda obj, data: obj.frombytes(data) +tobytes = lambda obj: obj.tobytes() diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index c089a189ae551..5d1d1ad596ec9 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -15,7 +15,7 @@ import numpy as np import pytest -from pandas.compat import PY3, BytesIO, StringIO, lrange, range +from pandas.compat import BytesIO, StringIO, lrange, range from pandas.errors import ParserError import pandas.util._test_decorators as td @@ -500,17 +500,11 @@ def __next__(self): def test_buffer_rd_bytes_bad_unicode(c_parser_only): # see gh-22748 - parser = c_parser_only t = BytesIO(b"\xB0") - - if PY3: - msg = "'utf-8' codec can't encode character" - t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape") - else: - msg = "'utf8' codec can't decode byte" - + t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape") + msg = "'utf-8' codec can't encode character" with pytest.raises(UnicodeError, match=msg): - parser.read_csv(t, encoding="UTF-8") + c_parser_only.read_csv(t, encoding="UTF-8") @pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"]) @@ -572,8 +566,7 @@ def test_file_handles_mmap(c_parser_only, csv1): m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) parser.read_csv(m) - if PY3: - assert not m.closed + assert not m.closed m.close() diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 9060543f1a373..671659c5e4ed0 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -75,9 +75,6 @@ def _set_noconvert_columns(self): def test_bytes_io_input(all_parsers): - if compat.PY2: - pytest.skip("Bytes-related test does not need to work on Python 2.x") - encoding = "cp1255" parser = all_parsers @@ -111,9 +108,7 @@ def test_bad_stream_exception(all_parsers, csv_dir_path): codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') parser = all_parsers - - msg = ("'utf-8' codec can't decode byte" if compat.PY3 - else "'utf8' codec can't decode byte") + msg = "'utf-8' codec can't decode byte" # Stream must be binary UTF8. with open(path, "rb") as handle, codecs.StreamRecoder( @@ -124,7 +119,6 @@ def test_bad_stream_exception(all_parsers, csv_dir_path): parser.read_csv(stream) -@pytest.mark.skipif(compat.PY2, reason="PY3-only test") def test_read_csv_local(all_parsers, csv1): prefix = u("file:///") if compat.is_platform_windows() else u("file://") parser = all_parsers @@ -957,16 +951,14 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding): utf8 = "utf-8" with tm.ensure_clean(path) as path: + from io import TextIOWrapper bytes_data = data.encode(encoding) with open(path, "wb") as f: f.write(bytes_data) bytes_buffer = BytesIO(data.encode(utf8)) - - if compat.PY3: - from io import TextIOWrapper - bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8) + bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8) result = parser.read_csv(path, encoding=encoding, **kwargs) expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs) @@ -975,16 +967,10 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("buffer", [ - False, - pytest.param(True, marks=pytest.mark.skipif( - compat.PY3, reason="Not supported on PY3"))]) -def test_utf16_example(all_parsers, csv_dir_path, buffer): +def test_utf16_example(all_parsers, csv_dir_path): path = os.path.join(csv_dir_path, "utf16_ex.txt") parser = all_parsers - - src = BytesIO(open(path, "rb").read()) if buffer else path - result = parser.read_csv(src, encoding="utf-16", sep="\t") + result = parser.read_csv(path, encoding="utf-16", sep="\t") assert len(result) == 50 @@ -1565,22 +1551,17 @@ def test_iteration_open_handle(all_parsers): kwargs = dict(squeeze=True, header=None) with tm.ensure_clean() as path: - with open(path, "wb" if compat.PY2 else "w") as f: + with open(path, "w") as f: f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") - with open(path, "rb" if compat.PY2 else "r") as f: + with open(path, "r") as f: for line in f: if "CCC" in line: break - if parser.engine == "c" and compat.PY2: - msg = "Mixing iteration and read methods would lose data" - with pytest.raises(ValueError, match=msg): - parser.read_csv(f, **kwargs) - else: - result = parser.read_csv(f, **kwargs) - expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0) - tm.assert_series_equal(result, expected) + result = parser.read_csv(f, **kwargs) + expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0) + tm.assert_series_equal(result, expected) @pytest.mark.parametrize("data,thousands,decimal", [ diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py index c2edff258f1b5..51981b9373867 100644 --- a/pandas/tests/io/parser/test_python_parser_only.py +++ b/pandas/tests/io/parser/test_python_parser_only.py @@ -11,7 +11,6 @@ import pytest -import pandas.compat as compat from pandas.compat import BytesIO, StringIO, u from pandas.errors import ParserError @@ -82,12 +81,10 @@ def test_sniff_delimiter_encoding(python_parser_only, encoding): """ if encoding is not None: + from io import TextIOWrapper data = u(data).encode(encoding) data = BytesIO(data) - - if compat.PY3: - from io import TextIOWrapper - data = TextIOWrapper(data, encoding=encoding) + data = TextIOWrapper(data, encoding=encoding) else: data = StringIO(data) diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py index b33a1b8448bea..49c1b08974545 100644 --- a/pandas/tests/io/parser/test_quoting.py +++ b/pandas/tests/io/parser/test_quoting.py @@ -9,7 +9,7 @@ import pytest -from pandas.compat import PY2, StringIO, u +from pandas.compat import StringIO, u from pandas.errors import ParserError from pandas import DataFrame @@ -125,10 +125,7 @@ def test_double_quote(all_parsers, doublequote, exp_data): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("quotechar", [ - u('"'), - pytest.param(u('\u0001'), marks=pytest.mark.skipif( - PY2, reason="Python 2.x does not handle unicode well."))]) +@pytest.mark.parametrize("quotechar", [u('"'), u('\u0001')]) def test_quotechar_unicode(all_parsers, quotechar): # see gh-14477 data = "a\n1" diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index 172bbe0bad4c7..72360a8a7468d 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -11,7 +11,6 @@ import numpy as np import pytest -import pandas.compat as compat from pandas.compat import BytesIO, StringIO import pandas as pd @@ -161,9 +160,6 @@ def test_read_csv_compat(): def test_bytes_io_input(): - if not compat.PY3: - pytest.skip("Bytes-related test - only needs to work on Python 3") - result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[2, 2], encoding="utf8") expected = DataFrame([["של", "ום"]], columns=["של", "ום"]) @@ -441,9 +437,6 @@ def test_multiple_delimiters(): def test_variable_width_unicode(): - if not compat.PY3: - pytest.skip("Bytes-related test - only needs to work on Python 3") - data = """ שלום שלום ום שלל @@ -567,8 +560,7 @@ def test_fwf_compression(compression_only, infer): kwargs = dict(widths=[5, 5], names=["one", "two"]) expected = read_fwf(StringIO(data), **kwargs) - if compat.PY3: - data = bytes(data, encoding="utf-8") + data = bytes(data, encoding="utf-8") with tm.ensure_clean(filename="tmp." + extension) as path: tm.write_to_compressed(compression, path, data) diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 3dd8d0449ef5f..cbd36bb0abeda 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -4,7 +4,6 @@ import numpy as np import pytest -from pandas.compat import PY2 from pandas.errors import EmptyDataError import pandas.util._test_decorators as td @@ -34,11 +33,6 @@ def setup_method(self, datapath): col = df.iloc[:, k] if col.dtype == np.int64: df.iloc[:, k] = df.iloc[:, k].astype(np.float64) - elif col.dtype == np.dtype('O'): - if PY2: - f = lambda x: (x.decode('utf-8') if - isinstance(x, str) else x) - df.iloc[:, k] = df.iloc[:, k].apply(f) self.data.append(df) def test_from_file(self): diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 565db92210b0a..ae9a8300fc72e 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -5,8 +5,6 @@ from numpy.random import randint import pytest -from pandas.compat import PY2 - import pandas as pd from pandas import DataFrame, get_option, read_clipboard from pandas.util import testing as tm @@ -166,14 +164,7 @@ def test_clipboard_copy_tabs_default(self, sep, excel, df, request, mock_clipboard): kwargs = build_kwargs(sep, excel) df.to_clipboard(**kwargs) - if PY2: - # to_clipboard copies unicode, to_csv produces bytes. This is - # expected behavior - result = mock_clipboard[request.node.name].encode('utf-8') - expected = df.to_csv(sep='\t') - assert result == expected - else: - assert mock_clipboard[request.node.name] == df.to_csv(sep='\t') + assert mock_clipboard[request.node.name] == df.to_csv(sep='\t') # Tests reading of white space separated tables @pytest.mark.parametrize('sep', [None, 'default']) diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py index d3569af8d7786..8150a058e5c5a 100644 --- a/pandas/tests/io/test_gbq.py +++ b/pandas/tests/io/test_gbq.py @@ -9,7 +9,7 @@ from pandas.compat import range import pandas as pd -from pandas import DataFrame, compat +from pandas import DataFrame import pandas.util.testing as tm api_exceptions = pytest.importorskip("google.api_core.exceptions") @@ -21,10 +21,7 @@ PRIVATE_KEY_JSON_PATH = None PRIVATE_KEY_JSON_CONTENTS = None -if compat.PY3: - DATASET_ID = 'pydata_pandas_bq_testing_py3' -else: - DATASET_ID = 'pydata_pandas_bq_testing_py2' +DATASET_ID = 'pydata_pandas_bq_testing_py3' TABLE_ID = 'new_test' DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID) diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index b2b0c21c81263..591eea7203094 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -10,7 +10,7 @@ import pytest from pandas.compat import ( - PY3, BytesIO, StringIO, is_platform_windows, map, reload, zip) + BytesIO, StringIO, is_platform_windows, map, reload, zip) from pandas.errors import ParserError import pandas.util._test_decorators as td @@ -88,8 +88,7 @@ class TestReadHtml(object): def set_files(self, datapath): self.spam_data = datapath('io', 'data', 'spam.html') self.spam_data_kwargs = {} - if PY3: - self.spam_data_kwargs['encoding'] = 'UTF-8' + self.spam_data_kwargs['encoding'] = 'UTF-8' self.banklist_data = datapath("io", "data", "banklist.html") @pytest.fixture(autouse=True, scope="function") diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index 375557c43a3ae..d72bc3cf3fbae 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -8,7 +8,7 @@ import pytest from pandas._libs.tslib import iNaT -from pandas.compat import PY3, u +from pandas.compat import u from pandas.errors import PerformanceWarning import pandas @@ -71,7 +71,7 @@ def check_arbitrary(a, b): # Temp, # Categorical.categories is changed from str to bytes in PY3 # maybe the same as GH 13591 - if PY3 and b.categories.inferred_type == 'string': + if b.categories.inferred_type == 'string': pass else: tm.assert_categorical_equal(a, b) @@ -930,7 +930,7 @@ def test_msgpacks_legacy(self, current_packers_data, all_packers_data, version = os.path.basename(os.path.dirname(legacy_packer)) # GH12142 0.17 files packed in P2 can't be read in P3 - if (compat.PY3 and version.startswith('0.17.') and + if (version.startswith('0.17.') and legacy_packer.split('.')[-4][-1] == '2'): msg = "Files packed in Py2 can't be read in Py3 ({})" pytest.skip(msg.format(version)) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 01a47a67ad1b6..df02811402b30 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -7,7 +7,6 @@ import numpy as np import pytest -from pandas.compat import PY3 import pandas.util._test_decorators as td import pandas as pd @@ -253,10 +252,9 @@ def test_columns_dtypes_invalid(self, engine): df.columns = [0, 1] self.check_error_on_write(df, engine, ValueError) - if PY3: - # bytes on PY3, on PY2 these are str - df.columns = [b'foo', b'bar'] - self.check_error_on_write(df, engine, ValueError) + # bytes + df.columns = [b'foo', b'bar'] + self.check_error_on_write(df, engine, ValueError) # python object df.columns = [datetime.datetime(2011, 1, 1, 0, 0), diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index b4befadaddc42..dde7b15bf9687 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -20,7 +20,7 @@ import pytest -from pandas.compat import PY3, is_platform_little_endian +from pandas.compat import is_platform_little_endian import pandas.util._test_decorators as td import pandas as pd @@ -472,14 +472,3 @@ def test_read(self, protocol, get_random_path): df.to_pickle(path, protocol=protocol) df2 = pd.read_pickle(path) tm.assert_frame_equal(df, df2) - - @pytest.mark.parametrize('protocol', [3, 4]) - @pytest.mark.skipif(PY3, reason="Testing invalid parameters for Python 2") - def test_read_bad_versions(self, protocol, get_random_path): - # For Python 2, HIGHEST_PROTOCOL should be 2. - msg = ("pickle protocol {protocol} asked for; the highest available " - "protocol is 2").format(protocol=protocol) - with pytest.raises(ValueError, match=msg): - with tm.ensure_clean(get_random_path) as path: - df = tm.makeDataFrame() - df.to_pickle(path, protocol=protocol) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 77a6a386bbb14..41d247798bd42 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -1036,18 +1036,8 @@ def check(format, index): # unicode index = tm.makeUnicodeIndex - if compat.PY3: - check('table', index) - check('fixed', index) - else: - - # only support for fixed types (and they have a perf warning) - pytest.raises(TypeError, check, 'table', index) - - # PerformanceWarning - with catch_warnings(record=True): - simplefilter("ignore", pd.errors.PerformanceWarning) - check('fixed', index) + check('table', index) + check('fixed', index) @pytest.mark.skipif(not is_platform_little_endian(), reason="reason platform is not little endian") @@ -1067,9 +1057,6 @@ def test_encoding(self): def test_latin_encoding(self): - if compat.PY2: - pytest.skip("[unicode] is not implemented as a table column") - values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'], [b'E\xc9, 17', b'a', b'b', b'c'], [b'EE, 17', b'', b'a', b'b', b'c'], @@ -1999,10 +1986,6 @@ def test_unimplemented_dtypes_table_columns(self): dtypes = [('date', datetime.date(2001, 1, 2))] - # py3 ok for unicode - if not compat.PY3: - dtypes.append(('unicode', u('\\u03c3'))) - # currently not supported dtypes #### for n, f in dtypes: df = tm.makeDataFrame() @@ -4405,14 +4388,8 @@ def test_to_hdf_with_object_column_names(self): types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex, tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex] - types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex] - - if compat.PY3: - types_should_run.append(tm.makeUnicodeIndex) - else: - # TODO: Add back to types_should_fail - # https://github.com/pandas-dev/pandas/issues/20907 - pass + types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex, + tm.makeUnicodeIndex] for index in types_should_fail: df = DataFrame(np.random.randn(10, 2), columns=index(2)) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index d51d9418a370b..daf622af889a3 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -28,7 +28,7 @@ import pytest import pandas.compat as compat -from pandas.compat import PY2, PY36, lrange, range, string_types +from pandas.compat import PY36, lrange, range, string_types from pandas.core.dtypes.common import ( is_datetime64_dtype, is_datetime64tz_dtype) @@ -2113,7 +2113,6 @@ def _get_sqlite_column_type(self, table, column): return ctype raise ValueError('Table %s, column %s not found' % (table, column)) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_dtype(self): if self.flavor == 'mysql': pytest.skip('Not applicable to MySQL legacy') diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index e6bcd7672c5b8..118ad716c3669 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -14,7 +14,7 @@ import pytest import pandas.compat as compat -from pandas.compat import PY3, ResourceWarning, iterkeys +from pandas.compat import ResourceWarning, iterkeys from pandas.core.dtypes.common import is_categorical_dtype @@ -1581,13 +1581,12 @@ def test_all_none_exception(self, version): def test_invalid_file_not_written(self, version): content = 'Here is one __�__ Another one __·__ Another one __½__' df = DataFrame([content], columns=['invalid']) - expected_exc = UnicodeEncodeError if PY3 else UnicodeDecodeError with tm.ensure_clean() as path: msg1 = (r"'latin-1' codec can't encode character '\\ufffd'" r" in position 14: ordinal not in range\(256\)") msg2 = ("'ascii' codec can't decode byte 0xef in position 14:" r" ordinal not in range\(128\)") - with pytest.raises(expected_exc, match=r'{}|{}'.format( + with pytest.raises(UnicodeEncodeError, match=r'{}|{}'.format( msg1, msg2)): with tm.assert_produces_warning(ResourceWarning): df.to_stata(path) @@ -1608,6 +1607,4 @@ def test_strl_latin1(self): for gso in gsos.split(b'GSO')[1:]: val = gso.split(b'\x00')[-2] size = gso[gso.find(b'\x82') + 1] - if not PY3: - size = ord(size) assert len(val) == size - 1 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index b9a29cc4ac27e..3a3618e2516b2 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -6,7 +6,7 @@ import numpy as np import pytest -from pandas.compat import PY3, lrange, zip +from pandas.compat import lrange, zip import pandas.util._test_decorators as td from pandas import DataFrame, Index, NaT, Series, isna @@ -1078,7 +1078,6 @@ def test_irreg_dtypes(self): _, ax = self.plt.subplots() _check_plot_works(df.plot, ax=ax) - @pytest.mark.xfail(reason="fails with py2.7.15", strict=False) @pytest.mark.slow def test_time(self): t = datetime(1, 1, 1, 3, 30, 0) @@ -1563,7 +1562,7 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs): # TODO(statsmodels 0.10.0): Remove the statsmodels check # https://github.com/pandas-dev/pandas/issues/24088 # https://github.com/statsmodels/statsmodels/issues/4772 - if PY3 and 'statsmodels' not in sys.modules: + if 'statsmodels' not in sys.modules: with ensure_clean(return_filelike=True) as path: pickle.dump(fig, path) finally: diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 7346a3b09aecf..f6ec3f99a7b61 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -10,7 +10,7 @@ from numpy.random import rand, randn import pytest -from pandas.compat import PY3, lmap, lrange, lzip, range, u, zip +from pandas.compat import lmap, lrange, lzip, range, u, zip import pandas.util._test_decorators as td from pandas.core.dtypes.api import is_list_like @@ -1433,17 +1433,6 @@ def test_boxplot(self): np.arange(1, len(numeric_cols) + 1)) assert len(ax.lines) == self.bp_n_objects * len(numeric_cols) - # different warning on py3 - if not PY3: - with tm.assert_produces_warning(UserWarning): - axes = _check_plot_works(df.plot.box, subplots=True, logy=True) - - self._check_axes_shape(axes, axes_num=3, layout=(1, 3)) - self._check_ax_scales(axes, yaxis='log') - for ax, label in zip(axes, labels): - self._check_text_labels(ax.get_xticklabels(), [label]) - assert len(ax.lines) == self.bp_n_objects - axes = series.plot.box(rot=40) self._check_ticks_props(axes, xrot=40, yrot=0) tm.close() diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index fbf7f610688ba..d792ee4ec94ad 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, DatetimeIndex, Index, NaT, Period, PeriodIndex, - RangeIndex, Series, Timedelta, TimedeltaIndex, Timestamp, compat, isna, + RangeIndex, Series, Timedelta, TimedeltaIndex, Timestamp, isna, timedelta_range, to_timedelta) from pandas.core import nanops import pandas.util.testing as tm @@ -1146,7 +1146,6 @@ def test_mode_intoverflow(self, dropna, expected1, expected2): expected2 = Series(expected2, dtype=np.uint64) tm.assert_series_equal(result, expected2) - @pytest.mark.skipif(not compat.PY3, reason="only PY3") def test_mode_sortwarning(self): # Check for the warning that is raised when the mode # results cannot be sorted diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index ccd50998e39b1..d047e6a6f089f 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -10,7 +10,7 @@ from numpy.random import randn import pytest -from pandas.compat import PY2, Iterable, StringIO, iteritems +from pandas.compat import Iterable, StringIO, iteritems from pandas.core.dtypes.dtypes import CategoricalDtype @@ -2303,13 +2303,7 @@ def test_concat_order(self): for i in range(100)] result = pd.concat(dfs, sort=True).columns - - if PY2: - # Different sort order between incomparable objects between - # python 2 and python3 via Index.union. - expected = dfs[1].columns - else: - expected = dfs[0].columns + expected = dfs[0].columns tm.assert_index_equal(result, expected) def test_concat_datetime_timezone(self): @@ -2359,7 +2353,6 @@ def test_concat_datetime_timezone(self): index=idx1.append(idx1)) tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(PY2, reason="Unhashable Decimal dtype") def test_concat_different_extension_dtypes_upcasts(self): a = pd.Series(pd.core.arrays.integer_array([1, 2])) b = pd.Series(to_decimal([1, 2])) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 42ba9bbd87e52..7a324df163a32 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -386,7 +386,6 @@ def test_unit_parser(self, units, np_unit, wrapper): result = Timedelta('2{}'.format(unit)) assert result == expected - @pytest.mark.skipif(compat.PY2, reason="requires python3.5 or higher") @pytest.mark.parametrize('unit', ['Y', 'y', 'M']) def test_unit_m_y_deprecated(self, unit): with tm.assert_produces_warning(FutureWarning) as w1: diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py index 74dd52c48153f..9a76896b23deb 100644 --- a/pandas/tests/scalar/timestamp/test_comparisons.py +++ b/pandas/tests/scalar/timestamp/test_comparisons.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from pandas.compat import PY2, long +from pandas.compat import long from pandas import Timestamp @@ -118,14 +118,8 @@ def test_cant_compare_tz_naive_w_aware(self, utc_fixture): with pytest.raises(TypeError): b >= a - if PY2: - with pytest.raises(TypeError): - a == b.to_pydatetime() - with pytest.raises(TypeError): - a.to_pydatetime() == b - else: - assert not a == b.to_pydatetime() - assert not a.to_pydatetime() == b + assert not a == b.to_pydatetime() + assert not a.to_pydatetime() == b def test_timestamp_compare_scalars(self): # case where ndim == 0 diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index b55d00b44fd67..0466deb4a29a0 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -14,7 +14,7 @@ from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import dateutil_gettz as gettz, get_timezone -from pandas.compat import PY2, PY3, long +from pandas.compat import long from pandas.compat.numpy import np_datetime64_compat from pandas.errors import OutOfBoundsDatetime import pandas.util._test_decorators as td @@ -125,13 +125,11 @@ def test_names(self, data, time_locale): # Work around https://github.com/pandas-dev/pandas/issues/22342 # different normalizations + expected_day = unicodedata.normalize("NFD", expected_day) + expected_month = unicodedata.normalize("NFD", expected_month) - if not PY2: - expected_day = unicodedata.normalize("NFD", expected_day) - expected_month = unicodedata.normalize("NFD", expected_month) - - result_day = unicodedata.normalize("NFD", result_day,) - result_month = unicodedata.normalize("NFD", result_month) + result_day = unicodedata.normalize("NFD", result_day,) + result_month = unicodedata.normalize("NFD", result_month) assert result_day == expected_day assert result_month == expected_month @@ -705,33 +703,12 @@ def test_basics_nanos(self): [946688461000000000 / long(1000000), dict(unit='ms')], [946688461000000000 / long(1000000000), dict(unit='s')], [10957, dict(unit='D', h=0)], - pytest.param((946688461000000000 + 500000) / long(1000000000), - dict(unit='s', us=499, ns=964), - marks=pytest.mark.skipif(not PY3, - reason='using truediv, so these' - ' are like floats')), - pytest.param((946688461000000000 + 500000000) / long(1000000000), - dict(unit='s', us=500000), - marks=pytest.mark.skipif(not PY3, - reason='using truediv, so these' - ' are like floats')), - pytest.param((946688461000000000 + 500000) / long(1000000), - dict(unit='ms', us=500), - marks=pytest.mark.skipif(not PY3, - reason='using truediv, so these' - ' are like floats')), - pytest.param((946688461000000000 + 500000) / long(1000000000), - dict(unit='s'), - marks=pytest.mark.skipif(PY3, - reason='get chopped in py2')), - pytest.param((946688461000000000 + 500000000) / long(1000000000), - dict(unit='s'), - marks=pytest.mark.skipif(PY3, - reason='get chopped in py2')), - pytest.param((946688461000000000 + 500000) / long(1000000), - dict(unit='ms'), - marks=pytest.mark.skipif(PY3, - reason='get chopped in py2')), + [(946688461000000000 + 500000) / long(1000000000), + dict(unit='s', us=499, ns=964)], + [(946688461000000000 + 500000000) / long(1000000000), + dict(unit='s', us=500000)], + [(946688461000000000 + 500000) / long(1000000), + dict(unit='ms', us=500)], [(946688461000000000 + 500000) / long(1000), dict(unit='us', us=500)], [(946688461000000000 + 500000000) / long(1000000), dict(unit='ms', us=500000)], diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index adcf66200a672..8a174c4ecba61 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -8,7 +8,7 @@ from pandas._libs.tslibs import conversion from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG -from pandas.compat import PY3, PY36 +from pandas.compat import PY36 import pandas.util._test_decorators as td from pandas import NaT, Timestamp @@ -281,10 +281,9 @@ def test_replace_tzinfo(self): result_dt = dt.replace(tzinfo=tzinfo) result_pd = Timestamp(dt).replace(tzinfo=tzinfo) - if PY3: - # datetime.timestamp() converts in the local timezone - with tm.set_timezone('UTC'): - assert result_dt.timestamp() == result_pd.timestamp() + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() @@ -292,10 +291,9 @@ def test_replace_tzinfo(self): result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None) result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None) - if PY3: - # datetime.timestamp() converts in the local timezone - with tm.set_timezone('UTC'): - assert result_dt.timestamp() == result_pd.timestamp() + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() @@ -369,9 +367,8 @@ def test_timestamp(self): # utsc is a different representation of the same time assert tsc.timestamp() == utsc.timestamp() - if PY3: - # datetime.timestamp() converts in the local timezone - with tm.set_timezone('UTC'): - # should agree with datetime.timestamp method - dt = ts.to_pydatetime() - assert dt.timestamp() == ts.timestamp() + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + # should agree with datetime.timestamp method + dt = ts.to_pydatetime() + assert dt.timestamp() == ts.timestamp() diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 13195a0d81d9c..04256c084a7b4 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -9,13 +9,12 @@ from numpy import nan import pytest -from pandas.compat import PY2, PY35, is_platform_windows, lrange, range +from pandas.compat import lrange, range import pandas.util._test_decorators as td import pandas as pd from pandas import ( - Categorical, CategoricalIndex, DataFrame, Series, compat, date_range, isna, - notna) + Categorical, CategoricalIndex, DataFrame, Series, date_range, isna, notna) from pandas.api.types import is_scalar from pandas.core.index import MultiIndex from pandas.core.indexes.datetimes import Timestamp @@ -285,9 +284,6 @@ def test_numpy_round(self): with pytest.raises(ValueError, match=msg): np.round(s, decimals=0, out=s) - @pytest.mark.xfail( - PY2 and is_platform_windows(), reason="numpy/numpy#7882", - raises=AssertionError, strict=True) def test_numpy_round_nan(self): # See gh-14197 s = Series([1.53, np.nan, 0.06]) @@ -297,10 +293,6 @@ def test_numpy_round_nan(self): assert_series_equal(result, expected) def test_built_in_round(self): - if not compat.PY3: - pytest.skip( - 'build in round cannot be overridden prior to Python 3') - s = Series([1.123, 2.123, 3.123], index=lrange(3)) result = round(s) expected_rounded0 = Series([1., 2., 3.], index=lrange(3)) @@ -497,8 +489,6 @@ def test_dot(self): with pytest.raises(ValueError, match=msg): a.dot(b.T) - @pytest.mark.skipif(not PY35, - reason='matmul supported for Python>=3.5') def test_matmul(self): # matmul test is for GH #10259 a = Series(np.random.randn(4), index=['p', 'q', 'r', 's']) @@ -771,7 +761,6 @@ def test_isin_empty(self, empty): result = s.isin(empty) tm.assert_series_equal(expected, result) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_ptp(self): # GH21614 N = 1000 diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 687ed59772d18..e1bb5b66c080e 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -5,7 +5,7 @@ import pytest import pandas as pd -from pandas import Series, compat +from pandas import Series from pandas.core.indexes.period import IncompatibleFrequency import pandas.util.testing as tm @@ -35,7 +35,7 @@ def test_flex_method_equivalence(self, opname, ts): other = ts[1](tser) check_reverse = ts[2] - if opname == 'div' and compat.PY3: + if opname == 'div': pytest.skip('div test only for Py3') op = getattr(Series, opname) diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index a916cf300653a..a05bd2965fb6e 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -17,7 +17,7 @@ import pandas as pd from pandas import ( DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex, - bdate_range, compat, date_range, period_range, timedelta_range) + bdate_range, date_range, period_range, timedelta_range) from pandas.core.arrays import PeriodArray import pandas.core.common as com import pandas.util.testing as tm @@ -353,9 +353,8 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): expected = Series([month.capitalize() for month in expected_months]) # work around https://github.com/pandas-dev/pandas/issues/22342 - if not compat.PY2: - result = result.str.normalize("NFD") - expected = expected.str.normalize("NFD") + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") tm.assert_series_equal(result, expected) @@ -363,9 +362,8 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): result = s_date.month_name(locale=time_locale) expected = expected.capitalize() - if not compat.PY2: - result = unicodedata.normalize("NFD", result) - expected = unicodedata.normalize("NFD", expected) + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", expected) assert result == expected diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 735b8553b14d3..718fe8ad2f40b 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -177,12 +177,6 @@ def test_astype_unicode(self): former_encoding = None - if not compat.PY3: - # In Python, we can force the default encoding for this test - former_encoding = sys.getdefaultencoding() - reload(sys) # noqa - - sys.setdefaultencoding("utf-8") if sys.getdefaultencoding() == "utf-8": test_series.append(Series([u('野菜食べないとやばい') .encode("utf-8")])) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index ef9e575e60385..6b8acdc6e3f5c 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -10,7 +10,7 @@ import pytz from pandas._libs.tslib import iNaT -from pandas.compat import PY2, range +from pandas.compat import range from pandas.errors import PerformanceWarning import pandas.util._test_decorators as td @@ -654,7 +654,6 @@ def test_timedelta64_nan(self): # expected = (datetime_series >= -0.5) & (datetime_series <= 0.5) # assert_series_equal(selector, expected) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_dropna_empty(self): s = Series([]) assert len(s.dropna()) == 0 diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 2f96fe906d980..b7618d2479a47 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -7,7 +7,6 @@ import numpy as np import pytest -import pandas.compat as compat from pandas.compat import range import pandas as pd @@ -665,7 +664,8 @@ def test_operators_corner(self): index=self.ts.index[:-5], name='ts') tm.assert_series_equal(added[:-5], expected) - pairings = [] + pairings = [(Series.div, operator.truediv, 1), + (Series.rdiv, lambda x, y: operator.truediv(y, x), 1)] for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']: fv = 0 lop = getattr(Series, op) @@ -675,12 +675,6 @@ def test_operators_corner(self): requiv = lambda x, y, op=op: getattr(operator, op)(y, x) pairings.append((lop, lequiv, fv)) pairings.append((rop, requiv, fv)) - if compat.PY3: - pairings.append((Series.div, operator.truediv, 1)) - pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1)) - else: - pairings.append((Series.div, operator.div, 1)) - pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1)) @pytest.mark.parametrize('op, equiv_op, fv', pairings) def test_operators_combine(self, op, equiv_op, fv): diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index f92a5490ebcb5..c6a149bc0c296 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -9,7 +9,7 @@ from pandas._libs.algos import Infinity, NegInfinity from pandas._libs.tslib import iNaT import pandas.compat as compat -from pandas.compat import PY2, product +from pandas.compat import product import pandas.util._test_decorators as td from pandas import NaT, Series, Timestamp, date_range @@ -203,7 +203,6 @@ def test_rank_categorical(self): assert_series_equal(na_ser.rank(na_option='bottom', pct=True), exp_bot) assert_series_equal(na_ser.rank(na_option='keep', pct=True), exp_keep) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_rank_signature(self): s = Series([0, 1]) s.rank(method='average') diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 842207f2a572f..b8a204011736f 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -5,7 +5,6 @@ import numpy as np -import pandas.compat as compat from pandas.compat import lrange, range, u import pandas as pd @@ -156,17 +155,11 @@ def test_repr_max_rows(self): def test_unicode_string_with_unicode(self): df = Series([u("\u05d0")], name=u("\u05d1")) - if compat.PY3: - str(df) - else: - compat.text_type(df) + str(df) def test_bytestring_with_unicode(self): df = Series([u("\u05d0")], name=u("\u05d1")) - if compat.PY3: - bytes(df) - else: - str(df) + bytes(df) def test_timeseries_repr_object_dtype(self): index = Index([datetime(2000, 1, 1) + timedelta(i) @@ -225,18 +218,8 @@ def __unicode__(self): idx = pd.Index(cat) ser = idx.to_series() - if compat.PY3: - # no reloading of sys, just check that the default (utf8) works - # as expected - repr(ser) - str(ser) - - else: - # set sys.defaultencoding to ascii, then change it back after - # the test - with tm.set_defaultencoding('ascii'): - repr(ser) - str(ser) + repr(ser) + str(ser) def test_categorical_repr(self): a = Series(Categorical([1, 2, 3, 4])) diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 162fa4ac9ab52..000909d10a26c 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -5,8 +5,6 @@ import numpy as np import pytest -from pandas.compat import PY2 - from pandas import Categorical, DataFrame, IntervalIndex, MultiIndex, Series import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal, assert_series_equal @@ -90,7 +88,6 @@ def test_sort_values(self): with pytest.raises(ValueError, match=msg): s.sort_values(inplace=True) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_sort_index(self): rindex = list(self.ts.index) random.shuffle(rindex) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index b6896685dd474..0f7902aeefe44 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -8,7 +8,7 @@ from pandas._libs.tslib import iNaT from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime -from pandas.compat import PY2, StringIO, lrange, product +from pandas.compat import StringIO, lrange, product from pandas.errors import NullFrequencyError import pandas.util._test_decorators as td @@ -867,7 +867,6 @@ def test_between_time_formats(self): for time_string in strings: assert len(ts.between_time(*time_string)) == expected_length - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_between_time_axis(self): # issue 8839 rng = date_range('1/1/2000', periods=100, freq='10min') diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 888d1fa1bfe45..954dd85f16c28 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -7,7 +7,7 @@ import pytest from pandas._libs.sparse import BlockIndex, IntIndex -from pandas.compat import PY2, lrange +from pandas.compat import lrange from pandas.errors import PerformanceWarning import pandas as pd @@ -881,7 +881,6 @@ def test_describe(self, float_frame): str(float_frame) desc = float_frame.describe() # noqa - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_join(self, float_frame): left = float_frame.loc[:, ['A', 'B']] right = float_frame.loc[:, ['C', 'D']] diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 083307371b699..232771750838e 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -11,7 +11,7 @@ from pandas._libs import ( algos as libalgos, groupby as libgroupby, hashtable as ht) -from pandas.compat import PY2, lrange, range +from pandas.compat import lrange, range from pandas.compat.numpy import np_array_datetime64_compat import pandas.util._test_decorators as td @@ -224,7 +224,6 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_complex_sorting(self): # gh 12666 - check no segfault x17 = np.array([complex(i) for i in range(17)], dtype=object) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index d6c6fdd312d3e..feacbd11a1ef6 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -9,7 +9,6 @@ import pytest from pandas._libs.tslib import iNaT -import pandas.compat as compat from pandas.compat import PYPY, StringIO, long from pandas.compat.numpy import np_array_datetime64_compat @@ -35,8 +34,6 @@ def test_string_methods_dont_fail(self): repr(self.container) str(self.container) bytes(self.container) - if not compat.PY3: - unicode(self.container) # noqa def test_tricky_container(self): if not hasattr(self, 'unicode_container'): @@ -44,8 +41,6 @@ def test_tricky_container(self): repr(self.unicode_container) str(self.unicode_container) bytes(self.unicode_container) - if not compat.PY3: - unicode(self.unicode_container) # noqa class CheckImmutable(object): diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index d1a3ee43a4623..ec98fad525ce2 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -8,7 +8,7 @@ import pytest from pandas.compat import ( - PY2, builtins, filter, get_range_parameters, iteritems, iterkeys, + builtins, filter, get_range_parameters, iteritems, iterkeys, itervalues, lfilter, lmap, lrange, lzip, map, next, range, re_type, zip) @@ -81,12 +81,7 @@ class TestCompatFunctions(object): 'start,stop,step', [(0, 10, 2), (11, -2, -1), (0, -5, 1), (2, 4, 8)]) def test_get_range_parameters(self, start, stop, step): rng = range(start, stop, step) - if PY2 and len(rng) == 0: - start_expected, stop_expected, step_expected = 0, 0, 1 - elif PY2 and len(rng) == 1: - start_expected, stop_expected, step_expected = start, start + 1, 1 - else: - start_expected, stop_expected, step_expected = start, stop, step + start_expected, stop_expected, step_expected = start, stop, step start_result, stop_result, step_result = get_range_parameters(rng) assert start_result == start_expected diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index baca66e0361ad..3e1df3e9c65c8 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -3,8 +3,6 @@ import pytest -from pandas.compat import PY2 - import pandas as pd from pandas.core.config import OptionError @@ -209,7 +207,6 @@ def test_set_option_multiple(self): assert self.cf.get_option('b.c') is None assert self.cf.get_option('b.b') == 10.0 - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_validation(self): self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int) self.cf.register_option('b.c', 'hullo', 'doc2', diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index f39d743256f45..04973789b456f 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -9,7 +9,7 @@ import numpy as np # noqa import pytest -from pandas.compat import PY2, PY36, is_platform_windows +from pandas.compat import PY36 from pandas import DataFrame from pandas.util import testing as tm @@ -58,8 +58,6 @@ def test_xarray(df): assert df.to_xarray() is not None -@pytest.mark.skipif(is_platform_windows() and PY2, - reason="Broken on Windows / Py2") def test_oo_optimizable(): # GH 21071 subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"]) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 7a2680135ea80..71116303c3900 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -8,7 +8,7 @@ from numpy.random import randn import pytest -from pandas import _np_version_under1p13, compat +from pandas import _np_version_under1p13 from pandas.core.api import DataFrame from pandas.core.computation import expressions as expr import pandas.util.testing as tm @@ -57,8 +57,6 @@ def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=True): expr._MIN_ELEMENTS = 0 operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv'] - if not compat.PY3: - operations.append('div') for arith in operations: operator_name = arith @@ -324,31 +322,30 @@ def testit(): def test_bool_ops_raise_on_arithmetic(self): df = DataFrame({'a': np.random.rand(10) > 0.5, 'b': np.random.rand(10) > 0.5}) - names = 'div', 'truediv', 'floordiv', 'pow' - ops = '/', '/', '//', '**' + names = 'truediv', 'floordiv', 'pow' + ops = '/', '//', '**' msg = 'operator %r not implemented for bool dtypes' for op, name in zip(ops, names): - if not compat.PY3 or name != 'div': - f = getattr(operator, name) - err_msg = re.escape(msg % op) + f = getattr(operator, name) + err_msg = re.escape(msg % op) - with pytest.raises(NotImplementedError, match=err_msg): - f(df, df) + with pytest.raises(NotImplementedError, match=err_msg): + f(df, df) - with pytest.raises(NotImplementedError, match=err_msg): - f(df.a, df.b) + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, df.b) - with pytest.raises(NotImplementedError, match=err_msg): - f(df.a, True) + with pytest.raises(NotImplementedError, match=err_msg): + f(df.a, True) - with pytest.raises(NotImplementedError, match=err_msg): - f(False, df.a) + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df.a) - with pytest.raises(NotImplementedError, match=err_msg): - f(False, df) + with pytest.raises(NotImplementedError, match=err_msg): + f(False, df) - with pytest.raises(NotImplementedError, match=err_msg): - f(df, True) + with pytest.raises(NotImplementedError, match=err_msg): + f(df, True) def test_bool_ops_warn_on_arithmetic(self): n = 10 diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index d1893b7efbc41..25cd206769a82 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -7,7 +7,6 @@ import numpy as np import pytest -from pandas.compat import PY2 from pandas.compat.numpy import _np_version_under1p13 import pandas.util._test_decorators as td @@ -729,7 +728,6 @@ def test_numeric_values(self): # Test complex assert nanops._ensure_numeric(1 + 2j) == 1 + 2j - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_ndarray(self): # Test numeric ndarray values = np.array([1, 2, 3]) diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index fa8fbddd59118..8f7caec2481ba 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -7,8 +7,6 @@ from numpy import nan import pytest -from pandas.compat import PY2 - from pandas import ( DataFrame, MultiIndex, Series, compat, concat, merge, to_datetime) from pandas.core import common as com @@ -413,7 +411,6 @@ def test_mixed_integer_from_list(self): expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object) tm.assert_numpy_array_equal(result, expected) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_unsortable(self): # GH 13714 arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) @@ -421,14 +418,8 @@ def test_unsortable(self): r"datetime\.datetime' and 'int'|'int' and 'datetime\.datetime" r"')|" r"unorderable types: int\(\) > datetime\.datetime\(\)") - if compat.PY2: - # RuntimeWarning: tp_compare didn't return -1 or -2 for exception - with warnings.catch_warnings(): - with pytest.raises(TypeError, match=msg): - safe_sort(arr) - else: - with pytest.raises(TypeError, match=msg): - safe_sort(arr) + with pytest.raises(TypeError, match=msg): + safe_sort(arr) def test_exceptions(self): with pytest.raises(TypeError, diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 40a83f90c8dfd..a2242fbea6013 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -10,7 +10,7 @@ import pytest import pandas.compat as compat -from pandas.compat import PY2, PY3, range, u +from pandas.compat import range, u from pandas import DataFrame, Index, MultiIndex, Series, concat, isna, notna import pandas.core.strings as strings @@ -120,8 +120,8 @@ def any_string_method(request): # subset of the full set from pandas/conftest.py _any_allowed_skipna_inferred_dtype = [ ('string', ['a', np.nan, 'c']), - ('unicode' if not PY3 else 'string', [u('a'), np.nan, u('c')]), - ('bytes' if PY3 else 'string', [b'a', np.nan, b'c']), + ('string', [u('a'), np.nan, u('c')]), + ('bytes', [b'a', np.nan, b'c']), ('empty', [np.nan, np.nan, np.nan]), ('empty', []), ('mixed-integer', ['a', np.nan, 2]) @@ -136,9 +136,8 @@ def any_allowed_skipna_inferred_dtype(request): The covered (inferred) types are: * 'string' - * 'unicode' (if PY2) * 'empty' - * 'bytes' (if PY3) + * 'bytes' * 'mixed' * 'mixed-integer' @@ -1021,11 +1020,8 @@ def test_replace_callable(self): tm.assert_series_equal(result, exp) # test with wrong number of arguments, raising an error - if compat.PY2: - p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?' - else: - p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ ' - r'(?(3)required )positional arguments?') + p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ ' + r'(?(3)required )positional arguments?') repl = lambda: None with pytest.raises(TypeError, match=p_err): @@ -1892,11 +1888,8 @@ def test_empty_str_methods(self): tm.assert_series_equal(empty_str, empty.str.capitalize()) tm.assert_series_equal(empty_str, empty.str.swapcase()) tm.assert_series_equal(empty_str, empty.str.normalize('NFC')) - if compat.PY3: - table = str.maketrans('a', 'b') - else: - import string - table = string.maketrans('a', 'b') + + table = str.maketrans('a', 'b') tm.assert_series_equal(empty_str, empty.str.translate(table)) def test_empty_str_methods_to_frame(self): @@ -2302,28 +2295,14 @@ def _check(result, expected): for klass in [Series, Index]: s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg']) - if not compat.PY3: - import string - table = string.maketrans('abc', 'cde') - else: - table = str.maketrans('abc', 'cde') + table = str.maketrans('abc', 'cde') result = s.str.translate(table) expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg']) _check(result, expected) - # use of deletechars is python 2 only - if not compat.PY3: + msg = "deletechars is not a valid argument" + with pytest.raises(ValueError, match=msg): result = s.str.translate(table, deletechars='fg') - expected = klass(['cdede', 'cdee', 'eddd', 'ede']) - _check(result, expected) - - result = s.str.translate(None, deletechars='fg') - expected = klass(['abcde', 'abcc', 'cddd', 'cde']) - _check(result, expected) - else: - msg = "deletechars is not a valid argument" - with pytest.raises(ValueError, match=msg): - result = s.str.translate(table, deletechars='fg') # Series with non-string values s = Series(['a', 'b', 'c', 1.2]) @@ -3323,7 +3302,6 @@ def test_encode_decode(self): tm.assert_series_equal(result, exp) - @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_encode_decode_errors(self): encodeBase = Series([u('a'), u('b'), u('a\x9d')]) @@ -3377,20 +3355,12 @@ def test_normalize(self): def test_index_str_accessor_visibility(self): from pandas.core.strings import StringMethods - if not compat.PY3: - cases = [(['a', 'b'], 'string'), (['a', u('b')], 'mixed'), - ([u('a'), u('b')], 'unicode'), - (['a', 'b', 1], 'mixed-integer'), - (['a', 'b', 1.3], 'mixed'), - (['a', 'b', 1.3, 1], 'mixed-integer'), - (['aa', datetime(2011, 1, 1)], 'mixed')] - else: - cases = [(['a', 'b'], 'string'), (['a', u('b')], 'string'), - ([u('a'), u('b')], 'string'), - (['a', 'b', 1], 'mixed-integer'), - (['a', 'b', 1.3], 'mixed'), - (['a', 'b', 1.3, 1], 'mixed-integer'), - (['aa', datetime(2011, 1, 1)], 'mixed')] + cases = [(['a', 'b'], 'string'), (['a', u('b')], 'string'), + ([u('a'), u('b')], 'string'), + (['a', 'b', 1], 'mixed-integer'), + (['a', 'b', 1.3], 'mixed'), + (['a', 'b', 1.3, 1], 'mixed-integer'), + (['aa', datetime(2011, 1, 1)], 'mixed')] for values, tp in cases: idx = Index(values) assert isinstance(Series(values).str, StringMethods) @@ -3432,16 +3402,9 @@ def test_str_accessor_no_new_attributes(self): def test_method_on_bytes(self): lhs = Series(np.array(list('abc'), 'S1').astype(object)) rhs = Series(np.array(list('def'), 'S1').astype(object)) - if compat.PY3: - with pytest.raises(TypeError, match="can't concat str to bytes"): - lhs.str.cat(rhs) - else: - result = lhs.str.cat(rhs) - expected = Series(np.array( - ['ad', 'be', 'cf'], 'S2').astype(object)) - tm.assert_series_equal(result, expected) + with pytest.raises(TypeError, match="can't concat str to bytes"): + lhs.str.cat(rhs) - @pytest.mark.skipif(compat.PY2, reason='not in python2') def test_casefold(self): # GH25405 expected = Series(['ss', NA, 'case', 'ssd']) diff --git a/pandas/tests/util/test_move.py b/pandas/tests/util/test_move.py index ef98f2032e6ca..2c90e1401f222 100644 --- a/pandas/tests/util/test_move.py +++ b/pandas/tests/util/test_move.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- -import sys -from uuid import uuid4 - import pytest -from pandas.compat import PY3, intern from pandas.util._move import BadMove, move_into_mutable_buffer, stolenbuf @@ -46,34 +42,3 @@ def test_exactly_one_ref(): # Materialize as byte-array to show that it is mutable. assert bytearray(as_stolen_buf) == b"test" - - -@pytest.mark.skipif(PY3, reason="bytes objects cannot be interned in PY3") -def test_interned(): - salt = uuid4().hex - - def make_string(): - # We need to actually create a new string so that it has refcount - # one. We use a uuid so that we know the string could not already - # be in the intern table. - return "".join(("testing: ", salt)) - - # This should work, the string has one reference on the stack. - move_into_mutable_buffer(make_string()) - refcount = [None] # nonlocal - - def ref_capture(ob): - # Subtract two because those are the references owned by this frame: - # 1. The local variables of this stack frame. - # 2. The python data stack of this stack frame. - refcount[0] = sys.getrefcount(ob) - 2 - return ob - - with pytest.raises(BadMove, match="testing"): - # If we intern the string, it will still have one reference. Now, - # it is in the intern table, so if other people intern the same - # string while the mutable buffer holds the first string they will - # be the same instance. - move_into_mutable_buffer(ref_capture(intern(make_string()))) # noqa - - assert refcount[0] == 1 From 4a2ac83727b4836bc099695a24d0241eecea3424 Mon Sep 17 00:00:00 2001 From: Jeremy Schendel Date: Thu, 21 Mar 2019 01:25:33 -0600 Subject: [PATCH 2/2] lint --- pandas/tests/dtypes/test_inference.py | 2 +- pandas/tests/indexes/datetimes/test_misc.py | 2 +- pandas/tests/indexing/test_floats.py | 2 +- pandas/tests/io/formats/test_format.py | 4 ++-- pandas/tests/test_compat.py | 4 ++-- pandas/tests/test_sorting.py | 1 - 6 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 41fa273586914..68857d6cc6902 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -30,7 +30,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, DateOffset, DatetimeIndex, Index, Interval, Period, - Series, Timedelta, TimedeltaIndex, Timestamp, compat, isna) + Series, Timedelta, TimedeltaIndex, Timestamp, isna) from pandas.util import testing as tm diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 1ea5bc17978ed..fd52a70e29848 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import ( - DatetimeIndex, Index, Timestamp, compat, date_range, datetime, offsets) + DatetimeIndex, Index, Timestamp, date_range, datetime, offsets) import pandas.util.testing as tm diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index c8732f0713b6d..e29ac8a3b197e 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -6,7 +6,7 @@ import pytest from pandas import ( - DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series, compat) + DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series) import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal, assert_series_equal diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index e9fd42c2a7216..e42ba834cb083 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -22,8 +22,8 @@ import pandas.compat as compat from pandas.compat import ( - StringIO, is_platform_32bit, is_platform_windows, lrange, lzip, range, - u, zip) + StringIO, is_platform_32bit, is_platform_windows, lrange, lzip, range, u, + zip) import pandas as pd from pandas import ( diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index ec98fad525ce2..3cf7ec3800936 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -8,8 +8,8 @@ import pytest from pandas.compat import ( - builtins, filter, get_range_parameters, iteritems, iterkeys, - itervalues, lfilter, lmap, lrange, lzip, map, next, range, re_type, zip) + builtins, filter, get_range_parameters, iteritems, iterkeys, itervalues, + lfilter, lmap, lrange, lzip, map, next, range, re_type, zip) class TestBuiltinIterators(object): diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 8f7caec2481ba..c753b5531fde7 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -1,7 +1,6 @@ from collections import defaultdict from datetime import datetime from itertools import product -import warnings import numpy as np from numpy import nan