diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index 47baf4e76f8c3..e81b919dbad2d 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -2,6 +2,7 @@ import pytest import pandas as pd +from pandas import Float64Index, Int64Index, RangeIndex, UInt64Index import pandas._testing as tm # ------------------------------------------------------------------ @@ -93,10 +94,10 @@ def zero(request): @pytest.fixture( params=[ - pd.Float64Index(np.arange(5, dtype="float64")), - pd.Int64Index(np.arange(5, dtype="int64")), - pd.UInt64Index(np.arange(5, dtype="uint64")), - pd.RangeIndex(5), + Float64Index(np.arange(5, dtype="float64")), + Int64Index(np.arange(5, dtype="int64")), + UInt64Index(np.arange(5, dtype="uint64")), + RangeIndex(5), ], ids=lambda x: type(x).__name__, ) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index b0b8f1345e4d3..35ffb0a246e25 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -17,6 +17,7 @@ import pandas as pd from pandas import ( + DateOffset, DatetimeIndex, NaT, Period, @@ -166,8 +167,8 @@ class TestDatetime64SeriesComparison: [NaT, NaT, Timedelta("3 days")], ), ( - [pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")], - [NaT, NaT, pd.Period("2011-03", freq="M")], + [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")], + [NaT, NaT, Period("2011-03", freq="M")], ), ], ) @@ -1078,7 +1079,7 @@ def test_dt64arr_add_timestamp_raises(self, box_with_array): 3.14, np.array([2.0, 3.0]), # GH#13078 datetime +/- Period is invalid - pd.Period("2011-01-01", freq="D"), + Period("2011-01-01", freq="D"), # https://github.com/pandas-dev/pandas/issues/10329 time(1, 2, 3), ], @@ -1288,7 +1289,7 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): ("microseconds", 5), ] for i, kwd in enumerate(relative_kwargs): - off = pd.DateOffset(**dict([kwd])) + off = DateOffset(**dict([kwd])) expected = DatetimeIndex([x + off for x in vec_items]) expected = tm.box_expected(expected, box_with_array) @@ -1298,7 +1299,7 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec - off) - off = pd.DateOffset(**dict(relative_kwargs[: i + 1])) + off = DateOffset(**dict(relative_kwargs[: i + 1])) expected = DatetimeIndex([x + off for x in vec_items]) expected = tm.box_expected(expected, box_with_array) @@ -1431,14 +1432,14 @@ def test_dt64arr_add_sub_DateOffset(self, box_with_array): # GH#10699 s = date_range("2000-01-01", "2000-01-31", name="a") s = tm.box_expected(s, box_with_array) - result = s + pd.DateOffset(years=1) - result2 = pd.DateOffset(years=1) + s + result = s + DateOffset(years=1) + result2 = DateOffset(years=1) + s exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None) exp = tm.box_expected(exp, box_with_array) tm.assert_equal(result, exp) tm.assert_equal(result2, exp) - result = s - pd.DateOffset(years=1) + result = s - DateOffset(years=1) exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None) exp = tm.box_expected(exp, box_with_array) tm.assert_equal(result, exp) @@ -1527,7 +1528,7 @@ def test_dt64arr_add_sub_offset_array( [ ( "__add__", - pd.DateOffset(months=3, days=10), + DateOffset(months=3, days=10), [ Timestamp("2014-04-11"), Timestamp("2015-04-11"), @@ -1538,7 +1539,7 @@ def test_dt64arr_add_sub_offset_array( ), ( "__add__", - pd.DateOffset(months=3), + DateOffset(months=3), [ Timestamp("2014-04-01"), Timestamp("2015-04-01"), @@ -1549,7 +1550,7 @@ def test_dt64arr_add_sub_offset_array( ), ( "__sub__", - pd.DateOffset(months=3, days=10), + DateOffset(months=3, days=10), [ Timestamp("2013-09-21"), Timestamp("2014-09-21"), @@ -1560,7 +1561,7 @@ def test_dt64arr_add_sub_offset_array( ), ( "__sub__", - pd.DateOffset(months=3), + DateOffset(months=3), [ Timestamp("2013-10-01"), Timestamp("2014-10-01"), diff --git a/pandas/tests/arrays/integer/test_construction.py b/pandas/tests/arrays/integer/test_construction.py index e0a4877da6c7e..15307b6f2190e 100644 --- a/pandas/tests/arrays/integer/test_construction.py +++ b/pandas/tests/arrays/integer/test_construction.py @@ -9,7 +9,7 @@ def test_uses_pandas_na(): - a = pd.array([1, None], dtype=pd.Int64Dtype()) + a = pd.array([1, None], dtype=Int64Dtype()) assert a[1] is pd.NA diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index a2a9bb2c4b039..46edde62b510e 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -274,7 +274,7 @@ def test_take(self): tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp) def test_take_all_empty(self): - a = pd.array([0, 0], dtype=pd.SparseDtype("int64")) + a = pd.array([0, 0], dtype=SparseDtype("int64")) result = a.take([0, 1], allow_fill=True, fill_value=np.nan) tm.assert_sp_array_equal(a, result) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 0751c37a7f439..ffd56b9c23bc8 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -355,7 +355,7 @@ def test_astype_object_frame(self, all_data): def test_astype_str(self, data): result = pd.Series(data[:5]).astype(str) - expected_dtype = pd.SparseDtype(str, str(data.fill_value)) + expected_dtype = SparseDtype(str, str(data.fill_value)) expected = pd.Series([str(x) for x in data[:5]], dtype=expected_dtype) self.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 408024e48a35a..f53378d86d7c6 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2456,7 +2456,7 @@ def test_from_records_sequencelike(self): # tuples is in the order of the columns result = DataFrame.from_records(tuples) - tm.assert_index_equal(result.columns, pd.RangeIndex(8)) + tm.assert_index_equal(result.columns, RangeIndex(8)) # test exclude parameter & we are casting the results here (as we don't # have dtype info to recover) diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 48f87664d5141..fc59df29ef18f 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -53,7 +53,7 @@ def test_shallow_copy_inherits_array_freq(self, index): def test_categorical_preserves_tz(self): # GH#18664 retain tz when going DTI-->Categorical-->DTI # TODO: parametrize over DatetimeIndex/DatetimeArray - # once CategoricalIndex(DTA) works + # once pd.CategoricalIndex(DTA) works dti = DatetimeIndex( [pd.NaT, "2015-01-01", "1999-04-06 15:14:13", "2015-01-01"], tz="US/Eastern" diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index e64cadf7a8069..76f2738948872 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -6,7 +6,7 @@ from pandas._libs.tslibs import Timestamp import pandas as pd -from pandas import Float64Index, Index, Int64Index, Series, UInt64Index +from pandas import Float64Index, Index, Int64Index, RangeIndex, Series, UInt64Index import pandas._testing as tm from pandas.tests.indexes.common import Base @@ -171,10 +171,10 @@ def test_constructor(self): @pytest.mark.parametrize( "index, dtype", [ - (pd.Int64Index, "float64"), - (pd.UInt64Index, "categorical"), - (pd.Float64Index, "datetime64"), - (pd.RangeIndex, "float64"), + (Int64Index, "float64"), + (UInt64Index, "categorical"), + (Float64Index, "datetime64"), + (RangeIndex, "float64"), ], ) def test_invalid_dtype(self, index, dtype): diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 567d37f318fd1..9b9ece68b887e 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -445,11 +445,11 @@ def test_loc_slice(self): def test_loc_and_at_with_categorical_index(self): # GH 20629 - s = Series([1, 2, 3], index=pd.CategoricalIndex(["A", "B", "C"])) + s = Series([1, 2, 3], index=CategoricalIndex(["A", "B", "C"])) assert s.loc["A"] == 1 assert s.at["A"] == 1 df = DataFrame( - [[1, 2], [3, 4], [5, 6]], index=pd.CategoricalIndex(["A", "B", "C"]) + [[1, 2], [3, 4], [5, 6]], index=CategoricalIndex(["A", "B", "C"]) ) assert df.loc["B", 1] == 4 assert df.at["B", 1] == 4 diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py index 1c9c514b20f46..26190edaa4960 100644 --- a/pandas/tests/io/excel/test_xlrd.py +++ b/pandas/tests/io/excel/test_xlrd.py @@ -38,6 +38,6 @@ def test_read_xlrd_book(read_ext, frame): # TODO: test for openpyxl as well def test_excel_table_sheet_by_index(datapath, read_ext): path = datapath("io", "data", "excel", f"test1{read_ext}") - with pd.ExcelFile(path) as excel: + with ExcelFile(path) as excel: with pytest.raises(xlrd.XLRDError): pd.read_excel(excel, sheet_name="asdf") diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 1e84ba1dbffd9..8c2297699807d 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -55,7 +55,7 @@ def test_ops(self, opname, obj): if not isinstance(obj, PeriodIndex): expected = getattr(obj.values, opname)() else: - expected = pd.Period(ordinal=getattr(obj.asi8, opname)(), freq=obj.freq) + expected = Period(ordinal=getattr(obj.asi8, opname)(), freq=obj.freq) if getattr(obj, "tz", None) is not None: # We need to de-localize before comparing to the numpy-produced result @@ -470,19 +470,19 @@ def test_numpy_minmax_datetime64(self): def test_minmax_period(self): # monotonic - idx1 = pd.PeriodIndex([NaT, "2011-01-01", "2011-01-02", "2011-01-03"], freq="D") + idx1 = PeriodIndex([NaT, "2011-01-01", "2011-01-02", "2011-01-03"], freq="D") assert not idx1.is_monotonic assert idx1[1:].is_monotonic # non-monotonic - idx2 = pd.PeriodIndex( + idx2 = PeriodIndex( ["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], freq="D" ) assert not idx2.is_monotonic for idx in [idx1, idx2]: - assert idx.min() == pd.Period("2011-01-01", freq="D") - assert idx.max() == pd.Period("2011-01-03", freq="D") + assert idx.min() == Period("2011-01-01", freq="D") + assert idx.max() == Period("2011-01-03", freq="D") assert idx1.argmin() == 1 assert idx2.argmin() == 0 assert idx1.argmax() == 3 diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index d3d33d6fe847e..5d75c22c8b795 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1285,7 +1285,7 @@ def test_resample_timegrouper(): expected.index = expected.index._with_freq(None) tm.assert_frame_equal(result, expected) - result = df.groupby(pd.Grouper(freq="M", key="A")).count() + result = df.groupby(Grouper(freq="M", key="A")).count() tm.assert_frame_equal(result, expected) df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates)))) @@ -1299,7 +1299,7 @@ def test_resample_timegrouper(): expected.index = expected.index._with_freq(None) tm.assert_frame_equal(result, expected) - result = df.groupby(pd.Grouper(freq="M", key="A")).count() + result = df.groupby(Grouper(freq="M", key="A")).count() tm.assert_frame_equal(result, expected) @@ -1319,8 +1319,8 @@ def test_resample_nunique(): } ) r = df.resample("D") - g = df.groupby(pd.Grouper(freq="D")) - expected = df.groupby(pd.Grouper(freq="D")).ID.apply(lambda x: x.nunique()) + g = df.groupby(Grouper(freq="D")) + expected = df.groupby(Grouper(freq="D")).ID.apply(lambda x: x.nunique()) assert expected.name == "ID" for t in [r, g]: @@ -1330,7 +1330,7 @@ def test_resample_nunique(): result = df.ID.resample("D").nunique() tm.assert_series_equal(result, expected) - result = df.ID.groupby(pd.Grouper(freq="D")).nunique() + result = df.ID.groupby(Grouper(freq="D")).nunique() tm.assert_series_equal(result, expected) @@ -1443,7 +1443,7 @@ def test_groupby_with_dst_time_change(): ).tz_convert("America/Chicago") df = DataFrame([1, 2], index=index) - result = df.groupby(pd.Grouper(freq="1d")).last() + result = df.groupby(Grouper(freq="1d")).last() expected_index_values = pd.date_range( "2016-11-02", "2016-11-24", freq="d", tz="America/Chicago" ) @@ -1587,7 +1587,7 @@ def test_downsample_dst_at_midnight(): index = index.tz_localize("UTC").tz_convert("America/Havana") data = list(range(len(index))) dataframe = DataFrame(data, index=index) - result = dataframe.groupby(pd.Grouper(freq="1D")).mean() + result = dataframe.groupby(Grouper(freq="1D")).mean() dti = date_range("2018-11-03", periods=3).tz_localize( "America/Havana", ambiguous=True @@ -1709,9 +1709,9 @@ def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k): ], ) def test_get_timestamp_range_edges(first, last, freq, exp_first, exp_last): - first = pd.Period(first) + first = Period(first) first = first.to_timestamp(first.freq) - last = pd.Period(last) + last = Period(last) last = last.to_timestamp(last.freq) exp_first = Timestamp(exp_first, freq=freq) diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index 8bdaad285e3f6..79fc6bae1a9eb 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -845,11 +845,11 @@ def test_resample_with_offset(self, start, end, start_freq, end_freq, offset): ], ) def test_get_period_range_edges(self, first, last, freq, exp_first, exp_last): - first = pd.Period(first) - last = pd.Period(last) + first = Period(first) + last = Period(last) - exp_first = pd.Period(exp_first, freq=freq) - exp_last = pd.Period(exp_last, freq=freq) + exp_first = Period(exp_first, freq=freq) + exp_last = Period(exp_last, freq=freq) freq = pd.tseries.frequencies.to_offset(freq) result = _get_period_range_edges(first, last, freq) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index e08876226cbc8..d774417e1851c 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -411,16 +411,14 @@ def test_pivot_no_values(self): }, index=idx, ) - res = df.pivot_table( - index=df.index.month, columns=pd.Grouper(key="dt", freq="M") - ) + res = df.pivot_table(index=df.index.month, columns=Grouper(key="dt", freq="M")) exp_columns = MultiIndex.from_tuples([("A", pd.Timestamp("2011-01-31"))]) exp_columns.names = [None, "dt"] exp = DataFrame([3.25, 2.0], index=[1, 2], columns=exp_columns) tm.assert_frame_equal(res, exp) res = df.pivot_table( - index=pd.Grouper(freq="A"), columns=pd.Grouper(key="dt", freq="M") + index=Grouper(freq="A"), columns=Grouper(key="dt", freq="M") ) exp = DataFrame( [3], index=pd.DatetimeIndex(["2011-12-31"], freq="A"), columns=exp_columns