diff --git a/ci/code_checks.sh b/ci/code_checks.sh index a9cce9357b531..7c48905135f89 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -38,8 +38,8 @@ function invgrep { } function check_namespace { - local -r CLASS="${1}" - grep -R -l --include "*.py" " ${CLASS}(" pandas/tests | xargs grep -n "pd\.${CLASS}(" + local -r CLASS=${1} + grep -R -l --include "*.py" " ${CLASS}(" pandas/tests | xargs grep -n "pd\.${CLASS}[(\.]" test $? -gt 0 } @@ -146,7 +146,7 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for inconsistent use of pandas namespace in tests' ; echo $MSG - for class in "Series" "DataFrame" "Index"; do + for class in "Series" "DataFrame" "Index" "MultiIndex" "Timestamp" "Timedelta" "TimedeltaIndex" "DatetimeIndex" "Categorical"; do check_namespace ${class} RET=$(($RET + $?)) done diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index f9dd4a7445a99..cefd2ae7a9ddb 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -115,7 +115,7 @@ def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture): dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data - other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)]) + other = np.array([0, 1, 2, dta[3], Timedelta(days=1)]) result = dta == other expected = np.array([False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) @@ -139,7 +139,7 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array): box = box_with_array xbox = box if box not in [pd.Index, pd.array] else np.ndarray - ts = pd.Timestamp.now(tz) + ts = Timestamp.now(tz) ser = Series([ts, pd.NaT]) obj = tm.box_expected(ser, box) @@ -158,12 +158,12 @@ class TestDatetime64SeriesComparison: "pair", [ ( - [pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")], - [NaT, NaT, pd.Timestamp("2011-01-03")], + [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")], + [NaT, NaT, Timestamp("2011-01-03")], ), ( - [pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")], - [NaT, NaT, pd.Timedelta("3 days")], + [Timedelta("1 days"), NaT, Timedelta("3 days")], + [NaT, NaT, Timedelta("3 days")], ), ( [pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")], @@ -281,30 +281,30 @@ def test_timestamp_compare_series(self, left, right): ser = Series(pd.date_range("20010101", periods=10), name="dates") s_nat = ser.copy(deep=True) - ser[0] = pd.Timestamp("nat") - ser[3] = pd.Timestamp("nat") + ser[0] = Timestamp("nat") + ser[3] = Timestamp("nat") left_f = getattr(operator, left) right_f = getattr(operator, right) # No NaT - expected = left_f(ser, pd.Timestamp("20010109")) - result = right_f(pd.Timestamp("20010109"), ser) + expected = left_f(ser, Timestamp("20010109")) + result = right_f(Timestamp("20010109"), ser) tm.assert_series_equal(result, expected) # NaT - expected = left_f(ser, pd.Timestamp("nat")) - result = right_f(pd.Timestamp("nat"), ser) + expected = left_f(ser, Timestamp("nat")) + result = right_f(Timestamp("nat"), ser) tm.assert_series_equal(result, expected) # Compare to Timestamp with series containing NaT - expected = left_f(s_nat, pd.Timestamp("20010109")) - result = right_f(pd.Timestamp("20010109"), s_nat) + expected = left_f(s_nat, Timestamp("20010109")) + result = right_f(Timestamp("20010109"), s_nat) tm.assert_series_equal(result, expected) # Compare to NaT with series containing NaT - expected = left_f(s_nat, pd.Timestamp("nat")) - result = right_f(pd.Timestamp("nat"), s_nat) + expected = left_f(s_nat, Timestamp("nat")) + result = right_f(Timestamp("nat"), s_nat) tm.assert_series_equal(result, expected) def test_dt64arr_timestamp_equality(self, box_with_array): @@ -313,7 +313,7 @@ def test_dt64arr_timestamp_equality(self, box_with_array): box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray ) - ser = Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"]) + ser = Series([Timestamp("2000-01-29 01:59:00"), "NaT"]) ser = tm.box_expected(ser, box_with_array) result = ser != ser @@ -413,10 +413,8 @@ def test_dti_cmp_nat(self, dtype, box_with_array): box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray ) - left = pd.DatetimeIndex( - [pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")] - ) - right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp("2011-01-03")]) + left = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-03")]) + right = DatetimeIndex([pd.NaT, pd.NaT, Timestamp("2011-01-03")]) left = tm.box_expected(left, box_with_array) right = tm.box_expected(right, box_with_array) @@ -454,10 +452,10 @@ def test_dti_cmp_nat_behaves_like_float_cmp_nan(self): fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) - didx1 = pd.DatetimeIndex( + didx1 = DatetimeIndex( ["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"] ) - didx2 = pd.DatetimeIndex( + didx2 = DatetimeIndex( ["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"] ) darr = np.array( @@ -611,8 +609,8 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array): dz = tm.box_expected(dz, box_with_array) # Check comparisons against scalar Timestamps - ts = pd.Timestamp("2000-03-14 01:59") - ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam") + ts = Timestamp("2000-03-14 01:59") + ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam") assert np.all(dr > ts) msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp" @@ -679,7 +677,7 @@ def test_scalar_comparison_tzawareness( def test_nat_comparison_tzawareness(self, op): # GH#19276 # tzaware DatetimeIndex should not raise when compared to NaT - dti = pd.DatetimeIndex( + dti = DatetimeIndex( ["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"] ) expected = np.array([op == operator.ne] * len(dti)) @@ -885,7 +883,7 @@ def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture): dti = pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS") other = np.timedelta64("NaT") - expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz) + expected = DatetimeIndex(["NaT"] * 9, tz=tz) obj = tm.box_expected(dti, box_with_array) expected = tm.box_expected(expected, box_with_array) @@ -904,7 +902,7 @@ def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array): tz = tz_naive_fixture dti = pd.date_range("2016-01-01", periods=3, tz=tz) - tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) tdarr = tdi.values expected = pd.date_range("2015-12-31", "2016-01-02", periods=3, tz=tz) @@ -932,9 +930,9 @@ def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array): @pytest.mark.parametrize( "ts", [ - pd.Timestamp("2013-01-01"), - pd.Timestamp("2013-01-01").to_pydatetime(), - pd.Timestamp("2013-01-01").to_datetime64(), + Timestamp("2013-01-01"), + Timestamp("2013-01-01").to_pydatetime(), + Timestamp("2013-01-01").to_datetime64(), ], ) def test_dt64arr_sub_dtscalar(self, box_with_array, ts): @@ -942,7 +940,7 @@ def test_dt64arr_sub_dtscalar(self, box_with_array, ts): idx = pd.date_range("2013-01-01", periods=3)._with_freq(None) idx = tm.box_expected(idx, box_with_array) - expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"]) + expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"]) expected = tm.box_expected(expected, box_with_array) result = idx - ts @@ -957,7 +955,7 @@ def test_dt64arr_sub_datetime64_not_ns(self, box_with_array): dti = pd.date_range("20130101", periods=3)._with_freq(None) dtarr = tm.box_expected(dti, box_with_array) - expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"]) + expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"]) expected = tm.box_expected(expected, box_with_array) result = dtarr - dt64 @@ -981,7 +979,7 @@ def test_dt64arr_sub_timestamp(self, box_with_array): def test_dt64arr_sub_NaT(self, box_with_array): # GH#18808 - dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp("19900315")]) + dti = DatetimeIndex([pd.NaT, Timestamp("19900315")]) ser = tm.box_expected(dti, box_with_array) result = ser - pd.NaT @@ -1102,7 +1100,7 @@ def test_dt64arr_add_sub_parr( self, dti_freq, pi_freq, box_with_array, box_with_array2 ): # GH#20049 subtracting PeriodIndex should raise TypeError - dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq) + dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq) pi = dti.to_period(pi_freq) dtarr = tm.box_expected(dti, box_with_array) @@ -1579,7 +1577,7 @@ def test_dti_add_sub_nonzero_mth_offset( mth = getattr(date, op) result = mth(offset) - expected = pd.DatetimeIndex(exp, tz=tz) + expected = DatetimeIndex(exp, tz=tz) expected = tm.box_expected(expected, box_with_array, False) tm.assert_equal(result, expected) @@ -1603,8 +1601,8 @@ def test_dt64_overflow_masking(self, box_with_array): def test_dt64_series_arith_overflow(self): # GH#12534, fixed by GH#19024 - dt = pd.Timestamp("1700-01-31") - td = pd.Timedelta("20000 Days") + dt = Timestamp("1700-01-31") + td = Timedelta("20000 Days") dti = pd.date_range("1949-09-30", freq="100Y", periods=4) ser = Series(dti) msg = "Overflow in int64 addition" @@ -1634,8 +1632,8 @@ def test_dt64_series_arith_overflow(self): tm.assert_series_equal(res, -expected) def test_datetimeindex_sub_timestamp_overflow(self): - dtimax = pd.to_datetime(["now", pd.Timestamp.max]) - dtimin = pd.to_datetime(["now", pd.Timestamp.min]) + dtimax = pd.to_datetime(["now", Timestamp.max]) + dtimin = pd.to_datetime(["now", Timestamp.min]) tsneg = Timestamp("1950-01-01") ts_neg_variants = [ @@ -1657,12 +1655,12 @@ def test_datetimeindex_sub_timestamp_overflow(self): with pytest.raises(OverflowError, match=msg): dtimax - variant - expected = pd.Timestamp.max.value - tspos.value + expected = Timestamp.max.value - tspos.value for variant in ts_pos_variants: res = dtimax - variant assert res[1].value == expected - expected = pd.Timestamp.min.value - tsneg.value + expected = Timestamp.min.value - tsneg.value for variant in ts_neg_variants: res = dtimin - variant assert res[1].value == expected @@ -1673,18 +1671,18 @@ def test_datetimeindex_sub_timestamp_overflow(self): def test_datetimeindex_sub_datetimeindex_overflow(self): # GH#22492, GH#22508 - dtimax = pd.to_datetime(["now", pd.Timestamp.max]) - dtimin = pd.to_datetime(["now", pd.Timestamp.min]) + dtimax = pd.to_datetime(["now", Timestamp.max]) + dtimin = pd.to_datetime(["now", Timestamp.min]) ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"]) ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"]) # General tests - expected = pd.Timestamp.max.value - ts_pos[1].value + expected = Timestamp.max.value - ts_pos[1].value result = dtimax - ts_pos assert result[1].value == expected - expected = pd.Timestamp.min.value - ts_neg[1].value + expected = Timestamp.min.value - ts_neg[1].value result = dtimin - ts_neg assert result[1].value == expected msg = "Overflow in int64 addition" @@ -1695,13 +1693,13 @@ def test_datetimeindex_sub_datetimeindex_overflow(self): dtimin - ts_pos # Edge cases - tmin = pd.to_datetime([pd.Timestamp.min]) - t1 = tmin + pd.Timedelta.max + pd.Timedelta("1us") + tmin = pd.to_datetime([Timestamp.min]) + t1 = tmin + Timedelta.max + Timedelta("1us") with pytest.raises(OverflowError, match=msg): t1 - tmin - tmax = pd.to_datetime([pd.Timestamp.max]) - t2 = tmax + pd.Timedelta.min - pd.Timedelta("1us") + tmax = pd.to_datetime([Timestamp.max]) + t2 = tmax + Timedelta.min - Timedelta("1us") with pytest.raises(OverflowError, match=msg): tmax - t2 @@ -1727,17 +1725,17 @@ def test_operators_datetimelike(self): # ## datetime64 ### dt1 = Series( [ - pd.Timestamp("20111230"), - pd.Timestamp("20120101"), - pd.Timestamp("20120103"), + Timestamp("20111230"), + Timestamp("20120101"), + Timestamp("20120103"), ] ) dt1.iloc[2] = np.nan dt2 = Series( [ - pd.Timestamp("20111231"), - pd.Timestamp("20120102"), - pd.Timestamp("20120104"), + Timestamp("20111231"), + Timestamp("20120102"), + Timestamp("20120104"), ] ) dt1 - dt2 @@ -1815,8 +1813,8 @@ def check(get_ser, test_ser): def test_sub_single_tz(self): # GH#12290 - s1 = Series([pd.Timestamp("2016-02-10", tz="America/Sao_Paulo")]) - s2 = Series([pd.Timestamp("2016-02-08", tz="America/Sao_Paulo")]) + s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")]) + s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")]) result = s1 - s2 expected = Series([Timedelta("2days")]) tm.assert_series_equal(result, expected) @@ -1829,7 +1827,7 @@ def test_dt64tz_series_sub_dtitz(self): # (with same tz) raises, fixed by #19024 dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific") ser = Series(dti) - expected = Series(pd.TimedeltaIndex(["0days"] * 10)) + expected = Series(TimedeltaIndex(["0days"] * 10)) res = dti - ser tm.assert_series_equal(res, expected) @@ -1928,7 +1926,7 @@ def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): def test_dt64_series_add_intlike(self, tz_naive_fixture, op): # GH#19123 tz = tz_naive_fixture - dti = pd.DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz) + dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz) ser = Series(dti) other = Series([20, 30, 40], dtype="uint8") @@ -2060,7 +2058,7 @@ def test_dti_add_intarray_non_tick(self, int_holder, freq): @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) def test_dti_add_intarray_no_freq(self, int_holder): # GH#19959 - dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"]) + dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"]) other = int_holder([9, 4, -1]) msg = "|".join( ["cannot subtract DatetimeArray from", "Addition/subtraction of integers"] @@ -2450,10 +2448,10 @@ def test_dti_addsub_object_arraylike( dti = pd.date_range("2017-01-01", periods=2, tz=tz) dtarr = tm.box_expected(dti, box_with_array) - other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)]) + other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)]) xbox = get_upcast_box(box_with_array, other) - expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture) + expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture) expected = tm.box_expected(expected, xbox) warn = PerformanceWarning @@ -2464,7 +2462,7 @@ def test_dti_addsub_object_arraylike( result = dtarr + other tm.assert_equal(result, expected) - expected = pd.DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture) + expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture) expected = tm.box_expected(expected, xbox) with tm.assert_produces_warning(warn): diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 1418aec015b92..836b1dcddf0dd 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -135,7 +135,7 @@ def test_mul_td64arr(self, left, box_cls): right = np.array([1, 2, 3], dtype="m8[s]") right = box_cls(right) - expected = pd.TimedeltaIndex(["10s", "40s", "90s"]) + expected = TimedeltaIndex(["10s", "40s", "90s"]) if isinstance(left, Series) or box_cls is Series: expected = Series(expected) @@ -155,7 +155,7 @@ def test_div_td64arr(self, left, box_cls): right = np.array([10, 40, 90], dtype="m8[s]") right = box_cls(right) - expected = pd.TimedeltaIndex(["1s", "2s", "3s"]) + expected = TimedeltaIndex(["1s", "2s", "3s"]) if isinstance(left, Series) or box_cls is Series: expected = Series(expected) @@ -189,7 +189,7 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array): # GH#19333 box = box_with_array index = numeric_idx - expected = pd.TimedeltaIndex([pd.Timedelta(days=n) for n in range(len(index))]) + expected = TimedeltaIndex([Timedelta(days=n) for n in range(len(index))]) index = tm.box_expected(index, box) expected = tm.box_expected(expected, box) @@ -244,10 +244,10 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array @pytest.mark.parametrize( "other", [ - pd.Timedelta(hours=31), - pd.Timedelta(hours=31).to_pytimedelta(), - pd.Timedelta(hours=31).to_timedelta64(), - pd.Timedelta(hours=31).to_timedelta64().astype("m8[h]"), + Timedelta(hours=31), + Timedelta(hours=31).to_pytimedelta(), + Timedelta(hours=31).to_timedelta64(), + Timedelta(hours=31).to_timedelta64().astype("m8[h]"), np.timedelta64("NaT"), np.timedelta64("NaT", "D"), pd.offsets.Minute(3), diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index ddd14af0918de..a31c2e6d8c258 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -196,8 +196,8 @@ def test_mixed_timezone_series_ops_object(self): # GH#13043 ser = Series( [ - pd.Timestamp("2015-01-01", tz="US/Eastern"), - pd.Timestamp("2015-01-01", tz="Asia/Tokyo"), + Timestamp("2015-01-01", tz="US/Eastern"), + Timestamp("2015-01-01", tz="Asia/Tokyo"), ], name="xxx", ) @@ -205,8 +205,8 @@ def test_mixed_timezone_series_ops_object(self): exp = Series( [ - pd.Timestamp("2015-01-02", tz="US/Eastern"), - pd.Timestamp("2015-01-02", tz="Asia/Tokyo"), + Timestamp("2015-01-02", tz="US/Eastern"), + Timestamp("2015-01-02", tz="Asia/Tokyo"), ], name="xxx", ) @@ -216,8 +216,8 @@ def test_mixed_timezone_series_ops_object(self): # object series & object series ser2 = Series( [ - pd.Timestamp("2015-01-03", tz="US/Eastern"), - pd.Timestamp("2015-01-05", tz="Asia/Tokyo"), + Timestamp("2015-01-03", tz="US/Eastern"), + Timestamp("2015-01-05", tz="Asia/Tokyo"), ], name="xxx", ) @@ -326,7 +326,7 @@ def test_rsub_object(self): "foo" - index with pytest.raises(TypeError, match=msg): - np.array([True, pd.Timestamp.now()]) - index + np.array([True, Timestamp.now()]) - index class MyIndex(pd.Index): diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index f02259a1c7e62..f9fcee889ec96 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -10,7 +10,7 @@ from pandas.errors import PerformanceWarning import pandas as pd -from pandas import PeriodIndex, Series, TimedeltaIndex, period_range +from pandas import PeriodIndex, Series, Timedelta, TimedeltaIndex, period_range import pandas._testing as tm from pandas.core import ops from pandas.core.arrays import TimedeltaArray @@ -41,9 +41,7 @@ def test_compare_zerodim(self, box_with_array): expected = tm.box_expected(expected, xbox) tm.assert_equal(result, expected) - @pytest.mark.parametrize( - "scalar", ["foo", pd.Timestamp.now(), pd.Timedelta(days=4)] - ) + @pytest.mark.parametrize("scalar", ["foo", Timestamp.now(), Timedelta(days=4)]) def test_compare_invalid_scalar(self, box_with_array, scalar): # comparison with scalar that cannot be interpreted as a Period pi = pd.period_range("2000", periods=4) @@ -698,9 +696,9 @@ def test_parr_add_sub_float_raises(self, op, other, box_with_array): "other", [ # datetime scalars - pd.Timestamp.now(), - pd.Timestamp.now().to_pydatetime(), - pd.Timestamp.now().to_datetime64(), + Timestamp.now(), + Timestamp.now().to_pydatetime(), + Timestamp.now().to_datetime64(), # datetime-like arrays pd.date_range("2016-01-01", periods=3, freq="H"), pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"), @@ -733,7 +731,7 @@ def test_parr_add_sub_invalid(self, other, box_with_array): def test_pi_add_sub_td64_array_non_tick_raises(self): rng = pd.period_range("1/1/2000", freq="Q", periods=3) - tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) tdarr = tdi.values msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]" @@ -752,7 +750,7 @@ def test_pi_add_sub_td64_array_tick(self): # PeriodIndex + Timedelta-like is allowed only with # tick-like frequencies rng = pd.period_range("1/1/2000", freq="90D", periods=3) - tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) tdarr = tdi.values expected = pd.period_range("12/31/1999", freq="90D", periods=3) @@ -1227,7 +1225,7 @@ def test_parr_add_sub_object_array(self): pi = pd.period_range("2000-12-31", periods=3, freq="D") parr = pi.array - other = np.array([pd.Timedelta(days=1), pd.offsets.Day(2), 3]) + other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3]) with tm.assert_produces_warning(PerformanceWarning): result = parr + other @@ -1258,10 +1256,10 @@ def test_ops_series_timedelta(self): name="xxx", ) - result = ser + pd.Timedelta("1 days") + result = ser + Timedelta("1 days") tm.assert_series_equal(result, expected) - result = pd.Timedelta("1 days") + ser + result = Timedelta("1 days") + ser tm.assert_series_equal(result, expected) result = ser + pd.tseries.offsets.Day() @@ -1492,7 +1490,7 @@ def test_pi_sub_period(self): result = np.subtract(pd.Period("2012-01", freq="M"), idx) tm.assert_index_equal(result, exp) - exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") result = idx - pd.Period("NaT", freq="M") tm.assert_index_equal(result, exp) assert result.freq == exp.freq @@ -1506,7 +1504,7 @@ def test_pi_sub_pdnat(self): idx = PeriodIndex( ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" ) - exp = pd.TimedeltaIndex([pd.NaT] * 4, name="idx") + exp = TimedeltaIndex([pd.NaT] * 4, name="idx") tm.assert_index_equal(pd.NaT - idx, exp) tm.assert_index_equal(idx - pd.NaT, exp) @@ -1525,7 +1523,7 @@ def test_pi_sub_period_nat(self): exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx") tm.assert_index_equal(result, exp) - exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") tm.assert_index_equal(idx - pd.Period("NaT", freq="M"), exp) tm.assert_index_equal(pd.Period("NaT", freq="M") - idx, exp) diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 5f556718ea0d3..31c7a17fd9ef5 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -119,7 +119,7 @@ def test_td64arr_cmp_arraylike_invalid(self, other): def test_td64arr_cmp_mixed_invalid(self): rng = timedelta_range("1 days", periods=5)._data - other = np.array([0, 1, 2, rng[3], pd.Timestamp.now()]) + other = np.array([0, 1, 2, rng[3], Timestamp.now()]) result = rng == other expected = np.array([False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) @@ -143,10 +143,8 @@ class TestTimedelta64ArrayComparisons: @pytest.mark.parametrize("dtype", [None, object]) def test_comp_nat(self, dtype): - left = pd.TimedeltaIndex( - [pd.Timedelta("1 days"), pd.NaT, pd.Timedelta("3 days")] - ) - right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta("3 days")]) + left = TimedeltaIndex([Timedelta("1 days"), pd.NaT, Timedelta("3 days")]) + right = TimedeltaIndex([pd.NaT, pd.NaT, Timedelta("3 days")]) lhs, rhs = left, right if dtype is object: @@ -173,7 +171,7 @@ def test_comp_nat(self, dtype): tm.assert_numpy_array_equal(pd.NaT > lhs, expected) def test_comparisons_nat(self): - tdidx1 = pd.TimedeltaIndex( + tdidx1 = TimedeltaIndex( [ "1 day", pd.NaT, @@ -183,7 +181,7 @@ def test_comparisons_nat(self): "5 day 00:00:03", ] ) - tdidx2 = pd.TimedeltaIndex( + tdidx2 = TimedeltaIndex( ["2 day", "2 day", pd.NaT, pd.NaT, "1 day 00:00:02", "5 days 00:00:03"] ) tdarr = np.array( @@ -1030,7 +1028,7 @@ def test_tdi_sub_dt64_array(self, box_with_array): dti = pd.date_range("2016-01-01", periods=3) tdi = dti - dti.shift(1) dtarr = dti.values - expected = pd.DatetimeIndex(dtarr) - tdi + expected = DatetimeIndex(dtarr) - tdi tdi = tm.box_expected(tdi, box_with_array) expected = tm.box_expected(expected, box_with_array) @@ -1047,7 +1045,7 @@ def test_tdi_add_dt64_array(self, box_with_array): dti = pd.date_range("2016-01-01", periods=3) tdi = dti - dti.shift(1) dtarr = dti.values - expected = pd.DatetimeIndex(dtarr) + tdi + expected = DatetimeIndex(dtarr) + tdi tdi = tm.box_expected(tdi, box_with_array) expected = tm.box_expected(expected, box_with_array) @@ -1062,7 +1060,7 @@ def test_td64arr_add_datetime64_nat(self, box_with_array): other = np.datetime64("NaT") tdi = timedelta_range("1 day", periods=3) - expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"]) + expected = DatetimeIndex(["NaT", "NaT", "NaT"]) tdser = tm.box_expected(tdi, box_with_array) expected = tm.box_expected(expected, box_with_array) @@ -1246,9 +1244,9 @@ def test_td64arr_add_sub_tdi(self, box_with_array, names): def test_td64arr_add_sub_td64_nat(self, box_with_array): # GH#23320 special handling for timedelta64("NaT") box = box_with_array - tdi = pd.TimedeltaIndex([NaT, Timedelta("1s")]) + tdi = TimedeltaIndex([NaT, Timedelta("1s")]) other = np.timedelta64("NaT") - expected = pd.TimedeltaIndex(["NaT"] * 2) + expected = TimedeltaIndex(["NaT"] * 2) obj = tm.box_expected(tdi, box) expected = tm.box_expected(expected, box) @@ -1472,14 +1470,14 @@ def test_td64arr_add_sub_object_array(self, box_with_array): tdarr = tm.box_expected(tdi, box) other = np.array( - [pd.Timedelta(days=1), pd.offsets.Day(2), pd.Timestamp("2000-01-04")] + [Timedelta(days=1), pd.offsets.Day(2), Timestamp("2000-01-04")] ) with tm.assert_produces_warning(PerformanceWarning): result = tdarr + other expected = pd.Index( - [pd.Timedelta(days=2), pd.Timedelta(days=4), pd.Timestamp("2000-01-07")] + [Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")] ) expected = tm.box_expected(expected, xbox) tm.assert_equal(result, expected) @@ -1492,9 +1490,7 @@ def test_td64arr_add_sub_object_array(self, box_with_array): with tm.assert_produces_warning(PerformanceWarning): result = other - tdarr - expected = pd.Index( - [pd.Timedelta(0), pd.Timedelta(0), pd.Timestamp("2000-01-01")] - ) + expected = pd.Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")]) expected = tm.box_expected(expected, xbox) tm.assert_equal(result, expected) @@ -2188,9 +2184,9 @@ def test_td64arr_pow_invalid(self, scalar_td, box_with_array): def test_add_timestamp_to_timedelta(): # GH: 35897 - timestamp = pd.Timestamp.now() + timestamp = Timestamp.now() result = timestamp + pd.timedelta_range("0s", "1s", periods=31) - expected = pd.DatetimeIndex( + expected = DatetimeIndex( [ timestamp + ( diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index 471e11cff9fd7..657511116c306 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -54,7 +54,7 @@ def test_constructor_empty(self): def test_constructor_empty_boolean(self): # see gh-22702 - cat = pd.Categorical([], categories=[True, False]) + cat = Categorical([], categories=[True, False]) categories = sorted(cat.categories.tolist()) assert categories == [False, True] @@ -412,7 +412,7 @@ def test_constructor_str_unknown(self): def test_constructor_np_strs(self): # GH#31499 Hastable.map_locations needs to work on np.str_ objects - cat = pd.Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")]) + cat = Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")]) assert all(isinstance(x, np.str_) for x in cat.categories) def test_constructor_from_categorical_with_dtype(self): @@ -637,48 +637,48 @@ def test_constructor_imaginary(self): def test_constructor_string_and_tuples(self): # GH 21416 - c = pd.Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object)) + c = Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object)) expected_index = Index([("a", "b"), ("b", "a"), "c"]) assert c.categories.equals(expected_index) def test_interval(self): idx = pd.interval_range(0, 10, periods=10) - cat = pd.Categorical(idx, categories=idx) + cat = Categorical(idx, categories=idx) expected_codes = np.arange(10, dtype="int8") tm.assert_numpy_array_equal(cat.codes, expected_codes) tm.assert_index_equal(cat.categories, idx) # infer categories - cat = pd.Categorical(idx) + cat = Categorical(idx) tm.assert_numpy_array_equal(cat.codes, expected_codes) tm.assert_index_equal(cat.categories, idx) # list values - cat = pd.Categorical(list(idx)) + cat = Categorical(list(idx)) tm.assert_numpy_array_equal(cat.codes, expected_codes) tm.assert_index_equal(cat.categories, idx) # list values, categories - cat = pd.Categorical(list(idx), categories=list(idx)) + cat = Categorical(list(idx), categories=list(idx)) tm.assert_numpy_array_equal(cat.codes, expected_codes) tm.assert_index_equal(cat.categories, idx) # shuffled values = idx.take([1, 2, 0]) - cat = pd.Categorical(values, categories=idx) + cat = Categorical(values, categories=idx) tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="int8")) tm.assert_index_equal(cat.categories, idx) # extra values = pd.interval_range(8, 11, periods=3) - cat = pd.Categorical(values, categories=idx) + cat = Categorical(values, categories=idx) expected_codes = np.array([8, 9, -1], dtype="int8") tm.assert_numpy_array_equal(cat.codes, expected_codes) tm.assert_index_equal(cat.categories, idx) # overlapping idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)]) - cat = pd.Categorical(idx, categories=idx) + cat = Categorical(idx, categories=idx) expected_codes = np.array([0, 1], dtype="int8") tm.assert_numpy_array_equal(cat.codes, expected_codes) tm.assert_index_equal(cat.categories, idx) diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py index c589b72fa2895..bf0b5289b5df1 100644 --- a/pandas/tests/arrays/categorical/test_indexing.py +++ b/pandas/tests/arrays/categorical/test_indexing.py @@ -1,7 +1,6 @@ import numpy as np import pytest -import pandas as pd from pandas import Categorical, CategoricalIndex, Index, PeriodIndex, Series import pandas._testing as tm import pandas.core.common as com @@ -40,28 +39,28 @@ def test_setitem(self): @pytest.mark.parametrize( "other", - [pd.Categorical(["b", "a"]), pd.Categorical(["b", "a"], categories=["b", "a"])], + [Categorical(["b", "a"]), Categorical(["b", "a"], categories=["b", "a"])], ) def test_setitem_same_but_unordered(self, other): # GH-24142 - target = pd.Categorical(["a", "b"], categories=["a", "b"]) + target = Categorical(["a", "b"], categories=["a", "b"]) mask = np.array([True, False]) target[mask] = other[mask] - expected = pd.Categorical(["b", "b"], categories=["a", "b"]) + expected = Categorical(["b", "b"], categories=["a", "b"]) tm.assert_categorical_equal(target, expected) @pytest.mark.parametrize( "other", [ - pd.Categorical(["b", "a"], categories=["b", "a", "c"]), - pd.Categorical(["b", "a"], categories=["a", "b", "c"]), - pd.Categorical(["a", "a"], categories=["a"]), - pd.Categorical(["b", "b"], categories=["b"]), + Categorical(["b", "a"], categories=["b", "a", "c"]), + Categorical(["b", "a"], categories=["a", "b", "c"]), + Categorical(["a", "a"], categories=["a"]), + Categorical(["b", "b"], categories=["b"]), ], ) def test_setitem_different_unordered_raises(self, other): # GH-24142 - target = pd.Categorical(["a", "b"], categories=["a", "b"]) + target = Categorical(["a", "b"], categories=["a", "b"]) mask = np.array([True, False]) msg = "Cannot set a Categorical with another, without identical categories" with pytest.raises(ValueError, match=msg): @@ -70,14 +69,14 @@ def test_setitem_different_unordered_raises(self, other): @pytest.mark.parametrize( "other", [ - pd.Categorical(["b", "a"]), - pd.Categorical(["b", "a"], categories=["b", "a"], ordered=True), - pd.Categorical(["b", "a"], categories=["a", "b", "c"], ordered=True), + Categorical(["b", "a"]), + Categorical(["b", "a"], categories=["b", "a"], ordered=True), + Categorical(["b", "a"], categories=["a", "b", "c"], ordered=True), ], ) def test_setitem_same_ordered_raises(self, other): # Gh-24142 - target = pd.Categorical(["a", "b"], categories=["a", "b"], ordered=True) + target = Categorical(["a", "b"], categories=["a", "b"], ordered=True) mask = np.array([True, False]) msg = "Cannot set a Categorical with another, without identical categories" with pytest.raises(ValueError, match=msg): @@ -85,7 +84,7 @@ def test_setitem_same_ordered_raises(self, other): def test_setitem_tuple(self): # GH#20439 - cat = pd.Categorical([(0, 1), (0, 2), (0, 1)]) + cat = Categorical([(0, 1), (0, 2), (0, 1)]) # This should not raise cat[1] = cat[0] @@ -216,15 +215,15 @@ def test_get_indexer_non_unique(self, idx_values, key_values, key_class): tm.assert_numpy_array_equal(exp_miss, res_miss) def test_where_unobserved_nan(self): - ser = Series(pd.Categorical(["a", "b"])) + ser = Series(Categorical(["a", "b"])) result = ser.where([True, False]) - expected = Series(pd.Categorical(["a", None], categories=["a", "b"])) + expected = Series(Categorical(["a", None], categories=["a", "b"])) tm.assert_series_equal(result, expected) # all NA - ser = Series(pd.Categorical(["a", "b"])) + ser = Series(Categorical(["a", "b"])) result = ser.where([False, False]) - expected = Series(pd.Categorical([None, None], categories=["a", "b"])) + expected = Series(Categorical([None, None], categories=["a", "b"])) tm.assert_series_equal(result, expected) def test_where_unobserved_categories(self): diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index ed70417523491..51dc66c18a3e6 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -180,7 +180,7 @@ def test_comparison_with_unknown_scalars(self): tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True])) def test_comparison_with_tuple(self): - cat = pd.Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object)) + cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object)) result = cat == "foo" expected = np.array([True, False, False, False], dtype=bool) @@ -337,8 +337,8 @@ def test_compare_different_lengths(self): def test_compare_unordered_different_order(self): # https://github.com/pandas-dev/pandas/issues/16603#issuecomment- # 349290078 - a = pd.Categorical(["a"], categories=["a", "b"]) - b = pd.Categorical(["b"], categories=["b", "a"]) + a = Categorical(["a"], categories=["a", "b"]) + b = Categorical(["b"], categories=["b", "a"]) assert not a.equals(b) def test_numeric_like_ops(self): @@ -398,7 +398,7 @@ def test_numeric_like_ops(self): def test_contains(self): # GH21508 - c = pd.Categorical(list("aabbca"), categories=list("cab")) + c = Categorical(list("aabbca"), categories=list("cab")) assert "b" in c assert "z" not in c @@ -410,7 +410,7 @@ def test_contains(self): assert 0 not in c assert 1 not in c - c = pd.Categorical(list("aabbca") + [np.nan], categories=list("cab")) + c = Categorical(list("aabbca") + [np.nan], categories=list("cab")) assert np.nan in c @pytest.mark.parametrize( diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 47ebfe311d9ea..c0567209ff91b 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -81,7 +81,7 @@ def test_from_sequence_dtype(self): @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) def test_astype_int(self, dtype): - arr = TimedeltaArray._from_sequence([pd.Timedelta("1H"), pd.Timedelta("2H")]) + arr = TimedeltaArray._from_sequence([Timedelta("1H"), Timedelta("2H")]) result = arr.astype(dtype) if np.dtype(dtype).kind == "u": @@ -95,15 +95,15 @@ def test_astype_int(self, dtype): def test_setitem_clears_freq(self): a = TimedeltaArray(pd.timedelta_range("1H", periods=2, freq="H")) - a[0] = pd.Timedelta("1H") + a[0] = Timedelta("1H") assert a.freq is None @pytest.mark.parametrize( "obj", [ - pd.Timedelta(seconds=1), - pd.Timedelta(seconds=1).to_timedelta64(), - pd.Timedelta(seconds=1).to_pytimedelta(), + Timedelta(seconds=1), + Timedelta(seconds=1).to_timedelta64(), + Timedelta(seconds=1).to_pytimedelta(), ], ) def test_setitem_objects(self, obj): @@ -112,7 +112,7 @@ def test_setitem_objects(self, obj): arr = TimedeltaArray(tdi, freq=tdi.freq) arr[0] = obj - assert arr[0] == pd.Timedelta(seconds=1) + assert arr[0] == Timedelta(seconds=1) @pytest.mark.parametrize( "other", @@ -206,11 +206,11 @@ def test_min_max(self): arr = TimedeltaArray._from_sequence(["3H", "3H", "NaT", "2H", "5H", "4H"]) result = arr.min() - expected = pd.Timedelta("2H") + expected = Timedelta("2H") assert result == expected result = arr.max() - expected = pd.Timedelta("5H") + expected = Timedelta("5H") assert result == expected result = arr.min(skipna=False) @@ -224,12 +224,12 @@ def test_sum(self): arr = tdi.array result = arr.sum(skipna=True) - expected = pd.Timedelta(hours=17) - assert isinstance(result, pd.Timedelta) + expected = Timedelta(hours=17) + assert isinstance(result, Timedelta) assert result == expected result = tdi.sum(skipna=True) - assert isinstance(result, pd.Timedelta) + assert isinstance(result, Timedelta) assert result == expected result = arr.sum(skipna=False) @@ -245,11 +245,11 @@ def test_sum(self): assert result is pd.NaT result = arr.sum(min_count=1) - assert isinstance(result, pd.Timedelta) + assert isinstance(result, Timedelta) assert result == expected result = tdi.sum(min_count=1) - assert isinstance(result, pd.Timedelta) + assert isinstance(result, Timedelta) assert result == expected def test_npsum(self): @@ -258,12 +258,12 @@ def test_npsum(self): arr = tdi.array result = np.sum(tdi) - expected = pd.Timedelta(hours=17) - assert isinstance(result, pd.Timedelta) + expected = Timedelta(hours=17) + assert isinstance(result, Timedelta) assert result == expected result = np.sum(arr) - assert isinstance(result, pd.Timedelta) + assert isinstance(result, Timedelta) assert result == expected def test_sum_2d_skipna_false(self): @@ -276,15 +276,15 @@ def test_sum_2d_skipna_false(self): assert result is pd.NaT result = tda.sum(axis=0, skipna=False) - expected = pd.TimedeltaIndex([pd.Timedelta(seconds=12), pd.NaT])._values + expected = pd.TimedeltaIndex([Timedelta(seconds=12), pd.NaT])._values tm.assert_timedelta_array_equal(result, expected) result = tda.sum(axis=1, skipna=False) expected = pd.TimedeltaIndex( [ - pd.Timedelta(seconds=1), - pd.Timedelta(seconds=5), - pd.Timedelta(seconds=9), + Timedelta(seconds=1), + Timedelta(seconds=5), + Timedelta(seconds=9), pd.NaT, ] )._values @@ -294,7 +294,7 @@ def test_sum_2d_skipna_false(self): @pytest.mark.parametrize( "add", [ - pd.Timedelta(0), + Timedelta(0), pd.Timestamp.now(), pd.Timestamp.now("UTC"), pd.Timestamp.now("Asia/Tokyo"), @@ -305,17 +305,17 @@ def test_std(self, add): arr = tdi.array result = arr.std(skipna=True) - expected = pd.Timedelta(hours=2) - assert isinstance(result, pd.Timedelta) + expected = Timedelta(hours=2) + assert isinstance(result, Timedelta) assert result == expected result = tdi.std(skipna=True) - assert isinstance(result, pd.Timedelta) + assert isinstance(result, Timedelta) assert result == expected if getattr(arr, "tz", None) is None: result = nanops.nanstd(np.asarray(arr), skipna=True) - assert isinstance(result, pd.Timedelta) + assert isinstance(result, Timedelta) assert result == expected result = arr.std(skipna=False) @@ -333,12 +333,12 @@ def test_median(self): arr = tdi.array result = arr.median(skipna=True) - expected = pd.Timedelta(hours=2) - assert isinstance(result, pd.Timedelta) + expected = Timedelta(hours=2) + assert isinstance(result, Timedelta) assert result == expected result = tdi.median(skipna=True) - assert isinstance(result, pd.Timedelta) + assert isinstance(result, Timedelta) assert result == expected result = arr.median(skipna=False) @@ -352,7 +352,7 @@ def test_mean(self): arr = tdi._data # manually verified result - expected = pd.Timedelta(arr.dropna()._ndarray.mean()) + expected = Timedelta(arr.dropna()._ndarray.mean()) result = arr.mean() assert result == expected @@ -374,7 +374,7 @@ def test_mean_2d(self): tm.assert_timedelta_array_equal(result, expected) result = tda.mean(axis=1) - expected = tda[:, 0] + pd.Timedelta(hours=12) + expected = tda[:, 0] + Timedelta(hours=12) tm.assert_timedelta_array_equal(result, expected) result = tda.mean(axis=None) diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index 538cf2c78b50e..24e88824088be 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -306,8 +306,8 @@ def test_array_multiindex_raises(): ), np.array( [ - pd.Timestamp("2000-01-01", tz="US/Central"), - pd.Timestamp("2000-01-02", tz="US/Central"), + Timestamp("2000-01-01", tz="US/Central"), + Timestamp("2000-01-02", tz="US/Central"), ] ), ), @@ -360,7 +360,7 @@ def test_to_numpy_dtype(as_series): # preserve tz by default result = obj.to_numpy() expected = np.array( - [pd.Timestamp("2000", tz=tz), pd.Timestamp("2001", tz=tz)], dtype=object + [Timestamp("2000", tz=tz), Timestamp("2001", tz=tz)], dtype=object ) tm.assert_numpy_array_equal(result, expected) @@ -377,9 +377,9 @@ def test_to_numpy_dtype(as_series): [ ([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]), ( - [pd.Timestamp("2000"), pd.Timestamp("2000"), pd.NaT], + [Timestamp("2000"), Timestamp("2000"), pd.NaT], None, - pd.Timestamp("2000"), + Timestamp("2000"), [np.datetime64("2000-01-01T00:00:00.000000000")] * 3, ), ], diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index afae6ab7b9c07..1c82d6f9a26ff 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -821,10 +821,10 @@ def test_infer_dtype_datetime64_with_na(self, na_value): np.array( [np.datetime64("2011-01-02"), np.timedelta64("nat")], dtype=object ), - np.array([np.datetime64("2011-01-01"), pd.Timestamp("2011-01-02")]), - np.array([pd.Timestamp("2011-01-02"), np.datetime64("2011-01-01")]), - np.array([np.nan, pd.Timestamp("2011-01-02"), 1.1]), - np.array([np.nan, "2011-01-01", pd.Timestamp("2011-01-02")]), + np.array([np.datetime64("2011-01-01"), Timestamp("2011-01-02")]), + np.array([Timestamp("2011-01-02"), np.datetime64("2011-01-01")]), + np.array([np.nan, Timestamp("2011-01-02"), 1.1]), + np.array([np.nan, "2011-01-01", Timestamp("2011-01-02")]), np.array([np.datetime64("nat"), np.timedelta64(1, "D")], dtype=object), np.array([np.timedelta64(1, "D"), np.datetime64("nat")], dtype=object), ], @@ -833,7 +833,7 @@ def test_infer_datetimelike_dtype_mixed(self, arr): assert lib.infer_dtype(arr, skipna=False) == "mixed" def test_infer_dtype_mixed_integer(self): - arr = np.array([np.nan, pd.Timestamp("2011-01-02"), 1]) + arr = np.array([np.nan, Timestamp("2011-01-02"), 1]) assert lib.infer_dtype(arr, skipna=True) == "mixed-integer" @pytest.mark.parametrize( @@ -841,7 +841,7 @@ def test_infer_dtype_mixed_integer(self): [ np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")]), np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]), - np.array([datetime(2011, 1, 1), pd.Timestamp("2011-01-02")]), + np.array([datetime(2011, 1, 1), Timestamp("2011-01-02")]), ], ) def test_infer_dtype_datetime(self, arr): @@ -849,7 +849,7 @@ def test_infer_dtype_datetime(self, arr): @pytest.mark.parametrize("na_value", [pd.NaT, np.nan]) @pytest.mark.parametrize( - "time_stamp", [pd.Timestamp("2011-01-01"), datetime(2011, 1, 1)] + "time_stamp", [Timestamp("2011-01-01"), datetime(2011, 1, 1)] ) def test_infer_dtype_datetime_with_na(self, na_value, time_stamp): # starts with nan @@ -1062,8 +1062,8 @@ def test_is_datetimelike_array_all_nan_nat_like(self): assert lib.is_datetime_with_singletz_array( np.array( [ - pd.Timestamp("20130101", tz="US/Eastern"), - pd.Timestamp("20130102", tz="US/Eastern"), + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130102", tz="US/Eastern"), ], dtype=object, ) @@ -1071,8 +1071,8 @@ def test_is_datetimelike_array_all_nan_nat_like(self): assert not lib.is_datetime_with_singletz_array( np.array( [ - pd.Timestamp("20130101", tz="US/Eastern"), - pd.Timestamp("20130102", tz="CET"), + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130102", tz="CET"), ], dtype=object, ) @@ -1115,8 +1115,8 @@ def test_date(self): @pytest.mark.parametrize( "values", [ - [date(2020, 1, 1), pd.Timestamp("2020-01-01")], - [pd.Timestamp("2020-01-01"), date(2020, 1, 1)], + [date(2020, 1, 1), Timestamp("2020-01-01")], + [Timestamp("2020-01-01"), date(2020, 1, 1)], [date(2020, 1, 1), pd.NaT], [pd.NaT, date(2020, 1, 1)], ], @@ -1194,7 +1194,7 @@ def test_to_object_array_width(self): def test_is_period(self): assert lib.is_period(pd.Period("2011-01", freq="M")) assert not lib.is_period(pd.PeriodIndex(["2011-01"], freq="M")) - assert not lib.is_period(pd.Timestamp("2011-01")) + assert not lib.is_period(Timestamp("2011-01")) assert not lib.is_period(1) assert not lib.is_period(np.nan) diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index e7b5d2598d8e7..c02185dd82043 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -225,7 +225,7 @@ def test_complex(self, value, expected): tm.assert_numpy_array_equal(result, expected) def test_datetime_other_units(self): - idx = pd.DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"]) + idx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"]) exp = np.array([False, True, False]) tm.assert_numpy_array_equal(isna(idx), exp) tm.assert_numpy_array_equal(notna(idx), ~exp) @@ -256,7 +256,7 @@ def test_datetime_other_units(self): tm.assert_series_equal(notna(s), ~exp) def test_timedelta_other_units(self): - idx = pd.TimedeltaIndex(["1 days", "NaT", "2 days"]) + idx = TimedeltaIndex(["1 days", "NaT", "2 days"]) exp = np.array([False, True, False]) tm.assert_numpy_array_equal(isna(idx), exp) tm.assert_numpy_array_equal(notna(idx), ~exp) diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index 7d03dadb20dd9..e8d82b525c9f4 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -199,7 +199,7 @@ def test_cast_category_to_extension_dtype(self, expected): ) def test_consistent_casting(self, dtype, expected): # GH 28448 - result = pd.Categorical("2015-01-01").astype(dtype) + result = Categorical("2015-01-01").astype(dtype) assert result == expected diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 5744a893cd9d6..03498b278f890 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -659,10 +659,10 @@ def test_applymap_box(self): # ufunc will not be boxed. Same test cases as the test_map_box df = DataFrame( { - "a": [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")], + "a": [Timestamp("2011-01-01"), Timestamp("2011-01-02")], "b": [ - pd.Timestamp("2011-01-01", tz="US/Eastern"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), ], "c": [pd.Timedelta("1 days"), pd.Timedelta("2 days")], "d": [ @@ -734,7 +734,7 @@ def apply_list(row): def test_apply_noreduction_tzaware_object(self): # https://github.com/pandas-dev/pandas/issues/31505 df = DataFrame( - {"foo": [pd.Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]" + {"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]" ) result = df.apply(lambda x: x) tm.assert_frame_equal(result, df) @@ -844,8 +844,8 @@ def test_with_dictlike_columns(self): tm.assert_series_equal(result, expected) df["tm"] = [ - pd.Timestamp("2017-05-01 00:00:00"), - pd.Timestamp("2017-05-02 00:00:00"), + Timestamp("2017-05-01 00:00:00"), + Timestamp("2017-05-02 00:00:00"), ] result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1) tm.assert_series_equal(result, expected) @@ -876,8 +876,8 @@ def test_with_dictlike_columns_with_infer(self): tm.assert_frame_equal(result, expected) df["tm"] = [ - pd.Timestamp("2017-05-01 00:00:00"), - pd.Timestamp("2017-05-02 00:00:00"), + Timestamp("2017-05-01 00:00:00"), + Timestamp("2017-05-02 00:00:00"), ] result = df.apply( lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand" @@ -920,8 +920,8 @@ def test_infer_output_shape_columns(self): "number": [1.0, 2.0], "string": ["foo", "bar"], "datetime": [ - pd.Timestamp("2017-11-29 03:30:00"), - pd.Timestamp("2017-11-29 03:45:00"), + Timestamp("2017-11-29 03:30:00"), + Timestamp("2017-11-29 03:45:00"), ], } ) @@ -957,10 +957,10 @@ def test_infer_output_shape_listlike_columns(self): df = DataFrame( { "a": [ - pd.Timestamp("2010-02-01"), - pd.Timestamp("2010-02-04"), - pd.Timestamp("2010-02-05"), - pd.Timestamp("2010-02-06"), + Timestamp("2010-02-01"), + Timestamp("2010-02-04"), + Timestamp("2010-02-05"), + Timestamp("2010-02-06"), ], "b": [9, 5, 4, 3], "c": [5, 3, 4, 2], @@ -1176,7 +1176,7 @@ def test_agg_multiple_mixed_no_warning(self): "A": [1, 6], "B": [1.0, 6.0], "C": ["bar", "foobarbaz"], - "D": [pd.Timestamp("2013-01-01"), pd.NaT], + "D": [Timestamp("2013-01-01"), pd.NaT], }, index=["min", "sum"], ) @@ -1297,12 +1297,12 @@ def test_nuiscance_columns(self): ) result = df.agg("min") - expected = Series([1, 1.0, "bar", pd.Timestamp("20130101")], index=df.columns) + expected = Series([1, 1.0, "bar", Timestamp("20130101")], index=df.columns) tm.assert_series_equal(result, expected) result = df.agg(["min"]) expected = DataFrame( - [[1, 1.0, "bar", pd.Timestamp("20130101")]], + [[1, 1.0, "bar", Timestamp("20130101")]], index=["min"], columns=df.columns, ) @@ -1505,9 +1505,9 @@ def test_apply_datetime_tz_issue(self): # GH 29052 timestamps = [ - pd.Timestamp("2019-03-15 12:34:31.909000+0000", tz="UTC"), - pd.Timestamp("2019-03-15 12:34:34.359000+0000", tz="UTC"), - pd.Timestamp("2019-03-15 12:34:34.660000+0000", tz="UTC"), + Timestamp("2019-03-15 12:34:31.909000+0000", tz="UTC"), + Timestamp("2019-03-15 12:34:34.359000+0000", tz="UTC"), + Timestamp("2019-03-15 12:34:34.660000+0000", tz="UTC"), ] df = DataFrame(data=[0, 1, 2], index=timestamps) result = df.apply(lambda x: x.name, axis=1) diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index e37d00c540974..d3c907f4ce30f 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -355,7 +355,7 @@ def test_assigning_ops(self): def test_setitem_single_row_categorical(self): # GH 25495 df = DataFrame({"Alpha": ["a"], "Numeric": [0]}) - categories = pd.Categorical(df["Alpha"], categories=["a", "b", "c"]) + categories = Categorical(df["Alpha"], categories=["a", "b", "c"]) df.loc[:, "Alpha"] = categories result = df["Alpha"] diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index d7be4d071ad74..58f0e5bc1ad39 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1456,11 +1456,11 @@ def test_loc_duplicates(self): # insert a duplicate element to the index trange = pd.date_range( - start=pd.Timestamp(year=2017, month=1, day=1), - end=pd.Timestamp(year=2017, month=1, day=5), + start=Timestamp(year=2017, month=1, day=1), + end=Timestamp(year=2017, month=1, day=5), ) - trange = trange.insert(loc=5, item=pd.Timestamp(year=2017, month=1, day=5)) + trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5)) df = DataFrame(0, index=trange, columns=["A", "B"]) bool_idx = np.array([False, False, False, False, False, True]) @@ -1530,12 +1530,12 @@ def test_setitem_with_unaligned_tz_aware_datetime_column(self): def test_loc_setitem_datetime_coercion(self): # gh-1048 - df = DataFrame({"c": [pd.Timestamp("2010-10-01")] * 3}) + df = DataFrame({"c": [Timestamp("2010-10-01")] * 3}) df.loc[0:1, "c"] = np.datetime64("2008-08-08") - assert pd.Timestamp("2008-08-08") == df.loc[0, "c"] - assert pd.Timestamp("2008-08-08") == df.loc[1, "c"] + assert Timestamp("2008-08-08") == df.loc[0, "c"] + assert Timestamp("2008-08-08") == df.loc[1, "c"] df.loc[2, "c"] = date(2005, 5, 5) - assert pd.Timestamp("2005-05-05") == df.loc[2, "c"] + assert Timestamp("2005-05-05") == df.loc[2, "c"] def test_loc_setitem_datetimelike_with_inference(self): # GH 7592 @@ -1812,27 +1812,27 @@ def test_object_casting_indexing_wraps_datetimelike(): ) ser = df.loc[0] - assert isinstance(ser.values[1], pd.Timestamp) + assert isinstance(ser.values[1], Timestamp) assert isinstance(ser.values[2], pd.Timedelta) ser = df.iloc[0] - assert isinstance(ser.values[1], pd.Timestamp) + assert isinstance(ser.values[1], Timestamp) assert isinstance(ser.values[2], pd.Timedelta) ser = df.xs(0, axis=0) - assert isinstance(ser.values[1], pd.Timestamp) + assert isinstance(ser.values[1], Timestamp) assert isinstance(ser.values[2], pd.Timedelta) mgr = df._mgr mgr._rebuild_blknos_and_blklocs() arr = mgr.fast_xs(0) - assert isinstance(arr[1], pd.Timestamp) + assert isinstance(arr[1], Timestamp) assert isinstance(arr[2], pd.Timedelta) blk = mgr.blocks[mgr.blknos[1]] assert blk.dtype == "M8[ns]" # we got the right block val = blk.iget((0, 0)) - assert isinstance(val, pd.Timestamp) + assert isinstance(val, Timestamp) blk = mgr.blocks[mgr.blknos[2]] assert blk.dtype == "m8[ns]" # we got the right block diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index c08a77d00a96d..38b5c150630fe 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -189,9 +189,9 @@ def test_append_dtypes(self): def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): # GH 30238 tz = tz_naive_fixture - df = DataFrame([pd.Timestamp(timestamp, tz=tz)]) + df = DataFrame([Timestamp(timestamp, tz=tz)]) result = df.append(df.iloc[0]).iloc[-1] - expected = Series(pd.Timestamp(timestamp, tz=tz), name=0) + expected = Series(Timestamp(timestamp, tz=tz), name=0) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index 0358bc3c04539..a90781cf43c16 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -277,12 +277,12 @@ def test_datetime_is_numeric_includes_datetime(self): { "a": [ 3, - pd.Timestamp("2012-01-02"), - pd.Timestamp("2012-01-01"), - pd.Timestamp("2012-01-01T12:00:00"), - pd.Timestamp("2012-01-02"), - pd.Timestamp("2012-01-02T12:00:00"), - pd.Timestamp("2012-01-03"), + Timestamp("2012-01-02"), + Timestamp("2012-01-01"), + Timestamp("2012-01-01T12:00:00"), + Timestamp("2012-01-02"), + Timestamp("2012-01-02T12:00:00"), + Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 09c10861e87c2..eb5bc31f3aa8f 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -19,7 +19,7 @@ ) def test_drop_raise_exception_if_labels_not_in_level(msg, labels, level): # GH 8594 - mi = pd.MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) + mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) s = pd.Series([10, 20, 30], index=mi) df = DataFrame([10, 20, 30], index=mi) @@ -32,7 +32,7 @@ def test_drop_raise_exception_if_labels_not_in_level(msg, labels, level): @pytest.mark.parametrize("labels,level", [(4, "a"), (7, "b")]) def test_drop_errors_ignore(labels, level): # GH 8594 - mi = pd.MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) + mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) s = pd.Series([10, 20, 30], index=mi) df = DataFrame([10, 20, 30], index=mi) @@ -313,7 +313,7 @@ def test_drop_multiindex_other_level_nan(self): expected = DataFrame( [2, 1], columns=["D"], - index=pd.MultiIndex.from_tuples( + index=MultiIndex.from_tuples( [("one", 0.0, "b"), ("one", np.nan, "a")], names=["A", "B", "C"] ), ) diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 5cdd65b8cf6e2..13e00c97d6f71 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -288,14 +288,14 @@ def test_quantile_box(self): df = DataFrame( { "A": [ - pd.Timestamp("2011-01-01"), - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-03"), + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), ], "B": [ - pd.Timestamp("2011-01-01", tz="US/Eastern"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), - pd.Timestamp("2011-01-03", tz="US/Eastern"), + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), ], "C": [ pd.Timedelta("1 days"), @@ -309,8 +309,8 @@ def test_quantile_box(self): exp = Series( [ - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), ], name=0.5, @@ -322,8 +322,8 @@ def test_quantile_box(self): exp = DataFrame( [ [ - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), ] ], @@ -336,28 +336,28 @@ def test_quantile_box(self): df = DataFrame( { "A": [ - pd.Timestamp("2011-01-01"), + Timestamp("2011-01-01"), pd.NaT, - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-03"), + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), ], "a": [ - pd.Timestamp("2011-01-01"), - pd.Timestamp("2011-01-02"), + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), pd.NaT, - pd.Timestamp("2011-01-03"), + Timestamp("2011-01-03"), ], "B": [ - pd.Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-01", tz="US/Eastern"), pd.NaT, - pd.Timestamp("2011-01-02", tz="US/Eastern"), - pd.Timestamp("2011-01-03", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), ], "b": [ - pd.Timestamp("2011-01-01", tz="US/Eastern"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), pd.NaT, - pd.Timestamp("2011-01-03", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), ], "C": [ pd.Timedelta("1 days"), @@ -378,10 +378,10 @@ def test_quantile_box(self): res = df.quantile(0.5, numeric_only=False) exp = Series( [ - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), pd.Timedelta("2 days"), ], @@ -394,10 +394,10 @@ def test_quantile_box(self): exp = DataFrame( [ [ - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), pd.Timedelta("2 days"), ] @@ -457,21 +457,21 @@ def test_quantile_nat(self): df = DataFrame( { "a": [ - pd.Timestamp("2012-01-01"), - pd.Timestamp("2012-01-02"), - pd.Timestamp("2012-01-03"), + Timestamp("2012-01-01"), + Timestamp("2012-01-02"), + Timestamp("2012-01-03"), ], "b": [pd.NaT, pd.NaT, pd.NaT], } ) res = df.quantile(0.5, numeric_only=False) - exp = Series([pd.Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5) + exp = Series([Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) res = df.quantile([0.5], numeric_only=False) exp = DataFrame( - [[pd.Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"] + [[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"] ) tm.assert_frame_equal(res, exp) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 2c909ab2f8227..baa310ddd6f09 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1245,13 +1245,13 @@ def test_replace_period(self): def test_replace_datetime(self): d = { "fname": { - "out_augmented_AUG_2011.json": pd.Timestamp("2011-08"), - "out_augmented_JAN_2011.json": pd.Timestamp("2011-01"), - "out_augmented_MAY_2012.json": pd.Timestamp("2012-05"), - "out_augmented_SUBSIDY_WEEK.json": pd.Timestamp("2011-04"), - "out_augmented_AUG_2012.json": pd.Timestamp("2012-08"), - "out_augmented_MAY_2011.json": pd.Timestamp("2011-05"), - "out_augmented_SEP_2013.json": pd.Timestamp("2013-09"), + "out_augmented_AUG_2011.json": Timestamp("2011-08"), + "out_augmented_JAN_2011.json": Timestamp("2011-01"), + "out_augmented_MAY_2012.json": Timestamp("2012-05"), + "out_augmented_SUBSIDY_WEEK.json": Timestamp("2011-04"), + "out_augmented_AUG_2012.json": Timestamp("2012-08"), + "out_augmented_MAY_2011.json": Timestamp("2011-05"), + "out_augmented_SEP_2013.json": Timestamp("2013-09"), } } @@ -1462,7 +1462,7 @@ def test_replace_commutative(self, df, to_replace, exp): @pytest.mark.parametrize( "replacer", [ - pd.Timestamp("20170827"), + Timestamp("20170827"), np.int8(1), np.int16(1), np.float32(1), diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index 6aebc23d1c016..92c9f7564a670 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -526,8 +526,8 @@ def test_reset_index_with_drop( {"a": [pd.NaT, pd.NaT], "b": [1, 2], "x": [11, 12]}, ), ( - [(pd.NaT, 1), (pd.Timestamp("2020-01-01"), 2)], - {"a": [pd.NaT, pd.Timestamp("2020-01-01")], "b": [1, 2], "x": [11, 12]}, + [(pd.NaT, 1), (Timestamp("2020-01-01"), 2)], + {"a": [pd.NaT, Timestamp("2020-01-01")], "b": [1, 2], "x": [11, 12]}, ), ( [(pd.NaT, 1), (pd.Timedelta(123, "d"), 2)], @@ -593,7 +593,7 @@ def test_reset_index_dtypes_on_empty_frame_with_multiindex(array, dtype): def test_reset_index_empty_frame_with_datetime64_multiindex(): # https://github.com/pandas-dev/pandas/issues/35606 idx = MultiIndex( - levels=[[pd.Timestamp("2020-07-20 00:00:00")], [3, 4]], + levels=[[Timestamp("2020-07-20 00:00:00")], [3, 4]], codes=[[], []], names=["a", "b"], ) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 16d451a12efc0..de847c12723b2 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -851,7 +851,7 @@ def test_sort_index_multiindex_sparse_column(self): i: pd.array([0.0, 0.0, 0.0, 0.0], dtype=pd.SparseDtype("float64", 0.0)) for i in range(0, 4) }, - index=pd.MultiIndex.from_product([[1, 2], [1, 2]]), + index=MultiIndex.from_product([[1, 2], [1, 2]]), ) result = expected.sort_index(level=0) diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index 7a1f0a35e1486..be5f3ee9c8191 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -242,7 +242,7 @@ def test_sort_values_stable_descending_multicolumn_sort(self): def test_sort_values_stable_categorial(self): # GH#16793 - df = DataFrame({"x": pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)}) + df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)}) expected = df.copy() sorted_df = df.sort_values("x", kind="mergesort") tm.assert_frame_equal(sorted_df, expected) @@ -385,7 +385,7 @@ def test_sort_values_na_position_with_categories(self): df = DataFrame( { - column_name: pd.Categorical( + column_name: Categorical( ["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True ) } @@ -477,7 +477,7 @@ def test_sort_values_nat(self): def test_sort_values_na_position_with_categories_raises(self): df = DataFrame( { - "c": pd.Categorical( + "c": Categorical( ["A", np.nan, "B", np.nan, "C"], categories=["A", "B", "C"], ordered=True, @@ -703,7 +703,7 @@ def test_sort_values_key_casts_to_categorical(self, ordered): def sorter(key): if key.name == "y": return pd.Series( - pd.Categorical(key, categories=categories, ordered=ordered) + Categorical(key, categories=categories, ordered=ordered) ) return key diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index f2847315f4959..fbb51b70d34fd 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -463,7 +463,7 @@ def test_nunique(self): @pytest.mark.parametrize("tz", [None, "UTC"]) def test_mean_mixed_datetime_numeric(self, tz): # https://github.com/pandas-dev/pandas/issues/24752 - df = DataFrame({"A": [1, 1], "B": [pd.Timestamp("2000", tz=tz)] * 2}) + df = DataFrame({"A": [1, 1], "B": [Timestamp("2000", tz=tz)] * 2}) with tm.assert_produces_warning(FutureWarning): result = df.mean() expected = Series([1.0], index=["A"]) @@ -474,7 +474,7 @@ def test_mean_excludes_datetimes(self, tz): # https://github.com/pandas-dev/pandas/issues/24752 # Our long-term desired behavior is unclear, but the behavior in # 0.24.0rc1 was buggy. - df = DataFrame({"A": [pd.Timestamp("2000", tz=tz)] * 2}) + df = DataFrame({"A": [Timestamp("2000", tz=tz)] * 2}) with tm.assert_produces_warning(FutureWarning): result = df.mean() @@ -1016,8 +1016,8 @@ def test_any_datetime(self): # GH 23070 float_data = [1, np.nan, 3, np.nan] datetime_data = [ - pd.Timestamp("1960-02-15"), - pd.Timestamp("1960-02-16"), + Timestamp("1960-02-15"), + Timestamp("1960-02-16"), pd.NaT, pd.NaT, ] @@ -1148,14 +1148,14 @@ def test_series_broadcasting(self): class TestDataFrameReductions: def test_min_max_dt64_with_NaT(self): # Both NaT and Timestamp are in DataFrame. - df = DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) + df = DataFrame({"foo": [pd.NaT, pd.NaT, Timestamp("2012-05-01")]}) res = df.min() - exp = Series([pd.Timestamp("2012-05-01")], index=["foo"]) + exp = Series([Timestamp("2012-05-01")], index=["foo"]) tm.assert_series_equal(res, exp) res = df.max() - exp = Series([pd.Timestamp("2012-05-01")], index=["foo"]) + exp = Series([Timestamp("2012-05-01")], index=["foo"]) tm.assert_series_equal(res, exp) # GH12941, only NaTs are in DataFrame. diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 479d7902aa111..46e34a7a58ae4 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2580,7 +2580,7 @@ def test_from_records_series_categorical_index(self): [pd.Interval(-20, -10), pd.Interval(-10, 0), pd.Interval(0, 10)] ) series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) - frame = pd.DataFrame.from_records(series_of_dicts, index=index) + frame = DataFrame.from_records(series_of_dicts, index=index) expected = DataFrame( {"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index ) diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index c34f5b35c1ab7..d1425c85caaee 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -228,7 +228,7 @@ def test_unstack_fill_frame_categorical(self): # Test unstacking with categorical data = Series(["a", "b", "c", "a"], dtype="category") - data.index = pd.MultiIndex.from_tuples( + data.index = MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] ) @@ -261,7 +261,7 @@ def test_unstack_fill_frame_categorical(self): def test_unstack_tuplename_in_multiindex(self): # GH 19966 - idx = pd.MultiIndex.from_product( + idx = MultiIndex.from_product( [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")] ) df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx) @@ -269,7 +269,7 @@ def test_unstack_tuplename_in_multiindex(self): expected = DataFrame( [[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]], - columns=pd.MultiIndex.from_tuples( + columns=MultiIndex.from_tuples( [ ("d", "a"), ("d", "b"), @@ -290,10 +290,10 @@ def test_unstack_tuplename_in_multiindex(self): ( ("A", "a"), [[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]], - pd.MultiIndex.from_tuples( + MultiIndex.from_tuples( [(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"] ), - pd.MultiIndex.from_tuples( + MultiIndex.from_tuples( [("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")], names=[None, ("A", "a")], ), @@ -302,7 +302,7 @@ def test_unstack_tuplename_in_multiindex(self): (("A", "a"), "B"), [[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]], Index([3, 4], name="C"), - pd.MultiIndex.from_tuples( + MultiIndex.from_tuples( [ ("d", "a", 1), ("d", "a", 2), @@ -322,7 +322,7 @@ def test_unstack_mixed_type_name_in_multiindex( self, unstack_idx, expected_values, expected_index, expected_columns ): # GH 19966 - idx = pd.MultiIndex.from_product( + idx = MultiIndex.from_product( [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"] ) df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx) @@ -493,7 +493,7 @@ def test_unstack_bool(self): def test_unstack_level_binding(self): # GH9856 - mi = pd.MultiIndex( + mi = MultiIndex( levels=[["foo", "bar"], ["one", "two"], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]], names=["first", "second", "third"], @@ -501,7 +501,7 @@ def test_unstack_level_binding(self): s = Series(0, index=mi) result = s.unstack([1, 2]).stack(0) - expected_mi = pd.MultiIndex( + expected_mi = MultiIndex( levels=[["foo", "bar"], ["one", "two"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=["first", "second"], @@ -560,7 +560,7 @@ def test_unstack_dtypes(self): result = df3.dtypes expected = Series( [np.dtype("int64")] * 4, - index=pd.MultiIndex.from_arrays( + index=MultiIndex.from_arrays( [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") ), ) @@ -573,7 +573,7 @@ def test_unstack_dtypes(self): result = df3.dtypes expected = Series( [np.dtype("float64")] * 2 + [np.dtype("int64")] * 2, - index=pd.MultiIndex.from_arrays( + index=MultiIndex.from_arrays( [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") ), ) @@ -583,7 +583,7 @@ def test_unstack_dtypes(self): result = df3.dtypes expected = Series( [np.dtype("float64")] * 2 + [np.dtype("object")] * 2, - index=pd.MultiIndex.from_arrays( + index=MultiIndex.from_arrays( [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") ), ) @@ -628,11 +628,11 @@ def test_unstack_non_unique_index_names(self): def test_unstack_unused_levels(self): # GH 17845: unused codes in index make unstack() cast int to float - idx = pd.MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1] + idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1] df = DataFrame([[1, 0]] * 3, index=idx) result = df.unstack() - exp_col = pd.MultiIndex.from_product([[0, 1], ["A", "B", "C"]]) + exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]]) expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col) tm.assert_frame_equal(result, expected) assert (result.columns.levels[1] == idx.levels[1]).all() @@ -640,7 +640,7 @@ def test_unstack_unused_levels(self): # Unused items on both levels levels = [[0, 1, 7], [0, 1, 2, 3]] codes = [[0, 0, 1, 1], [0, 2, 0, 2]] - idx = pd.MultiIndex(levels, codes) + idx = MultiIndex(levels, codes) block = np.arange(4).reshape(2, 2) df = DataFrame(np.concatenate([block, block + 4]), index=idx) result = df.unstack() @@ -653,7 +653,7 @@ def test_unstack_unused_levels(self): # With mixed dtype and NaN levels = [["a", 2, "c"], [1, 3, 5, 7]] codes = [[0, -1, 1, 1], [0, 2, -1, 2]] - idx = pd.MultiIndex(levels, codes) + idx = MultiIndex(levels, codes) data = np.arange(8) df = DataFrame(data.reshape(4, 2), index=idx) @@ -665,7 +665,7 @@ def test_unstack_unused_levels(self): result = df.unstack(level=level) exp_data = np.zeros(18) * np.nan exp_data[idces] = data - cols = pd.MultiIndex.from_product([[0, 1], col_level]) + cols = MultiIndex.from_product([[0, 1], col_level]) expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols) tm.assert_frame_equal(result, expected) @@ -690,8 +690,8 @@ def test_unstack_long_index(self): # The error occurred only, if a lot of indices are used. df = DataFrame( [[1]], - columns=pd.MultiIndex.from_tuples([[0]], names=["c1"]), - index=pd.MultiIndex.from_tuples( + columns=MultiIndex.from_tuples([[0]], names=["c1"]), + index=MultiIndex.from_tuples( [[0, 0, 1, 0, 0, 0, 1]], names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"], ), @@ -699,7 +699,7 @@ def test_unstack_long_index(self): result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"]) expected = DataFrame( [[1]], - columns=pd.MultiIndex.from_tuples( + columns=MultiIndex.from_tuples( [[0, 0, 1, 0, 0, 0, 1]], names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"], ), @@ -711,10 +711,10 @@ def test_unstack_multi_level_cols(self): # PH 24729: Unstack a df with multi level columns df = DataFrame( [[0.0, 0.0], [0.0, 0.0]], - columns=pd.MultiIndex.from_tuples( + columns=MultiIndex.from_tuples( [["B", "C"], ["B", "D"]], names=["c1", "c2"] ), - index=pd.MultiIndex.from_tuples( + index=MultiIndex.from_tuples( [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"] ), ) @@ -724,8 +724,8 @@ def test_unstack_multi_level_rows_and_cols(self): # PH 28306: Unstack df with multi level cols and rows df = DataFrame( [[1, 2], [3, 4], [-1, -2], [-3, -4]], - columns=pd.MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]), - index=pd.MultiIndex.from_tuples( + columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]), + index=MultiIndex.from_tuples( [ ["m1", "P3", 222], ["m1", "A5", 111], @@ -1039,7 +1039,7 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels): # `MultiIndex.from_product` preserves categorical dtype - # it's tested elsewhere. - midx = pd.MultiIndex.from_product([df.index, cidx]) + midx = MultiIndex.from_product([df.index, cidx]) expected = Series([10, 11, 12], index=midx) tm.assert_series_equal(result, expected) @@ -1049,7 +1049,7 @@ def test_stack_preserve_categorical_dtype_values(self): cat = pd.Categorical(["a", "a", "b", "c"]) df = DataFrame({"A": cat, "B": cat}) result = df.stack() - index = pd.MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]]) + index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]]) expected = Series( pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index ) @@ -1058,16 +1058,16 @@ def test_stack_preserve_categorical_dtype_values(self): @pytest.mark.parametrize( "index, columns", [ - ([0, 0, 1, 1], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])), - ([0, 0, 2, 3], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])), - ([0, 1, 2, 3], pd.MultiIndex.from_product([[1, 2], ["a", "b"]])), + ([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])), ], ) def test_stack_multi_columns_non_unique_index(self, index, columns): # GH-28301 df = DataFrame(index=index, columns=columns).fillna(1) stacked = df.stack() - new_index = pd.MultiIndex.from_tuples(stacked.index.to_numpy()) + new_index = MultiIndex.from_tuples(stacked.index.to_numpy()) expected = DataFrame( stacked.to_numpy(), index=new_index, columns=stacked.columns ) @@ -1078,9 +1078,7 @@ def test_stack_multi_columns_non_unique_index(self, index, columns): @pytest.mark.parametrize("level", [0, 1]) def test_unstack_mixed_extension_types(self, level): - index = pd.MultiIndex.from_tuples( - [("A", 0), ("A", 1), ("B", 1)], names=["a", "b"] - ) + index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"]) df = DataFrame( { "A": pd.core.arrays.integer_array([0, 1, None]), @@ -1101,13 +1099,13 @@ def test_unstack_mixed_extension_types(self, level): @pytest.mark.parametrize("level", [0, "baz"]) def test_unstack_swaplevel_sortlevel(self, level): # GH 20994 - mi = pd.MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"]) + mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"]) df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"]) df.columns.name = "foo" expected = DataFrame( [[3, 1, 2, 0]], - columns=pd.MultiIndex.from_tuples( + columns=MultiIndex.from_tuples( [("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"] ), ) @@ -1120,7 +1118,7 @@ def test_unstack_swaplevel_sortlevel(self, level): def test_unstack_fill_frame_object(): # GH12815 Test unstacking with object. data = Series(["a", "b", "c", "a"], dtype="object") - data.index = pd.MultiIndex.from_tuples( + data.index = MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] ) @@ -1154,7 +1152,7 @@ def test_unstack_timezone_aware_values(): expected = DataFrame( [[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]], index=Index(["a"], name="a"), - columns=pd.MultiIndex( + columns=MultiIndex( levels=[["timestamp", "c"], ["b"]], codes=[[0, 1], [0, 0]], names=[None, "b"], @@ -1172,9 +1170,7 @@ def test_stack_timezone_aware_values(): result = df.stack() expected = Series( ts, - index=pd.MultiIndex( - levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]] - ), + index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]), ) tm.assert_series_equal(result, expected) @@ -1212,12 +1208,12 @@ def test_unstacking_multi_index_df(): def test_stack_positional_level_duplicate_column_names(): # https://github.com/pandas-dev/pandas/issues/36353 - columns = pd.MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"]) + columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"]) df = DataFrame([[1, 1, 1, 1]], columns=columns) result = df.stack(0) new_columns = Index(["y", "z"], name="a") - new_index = pd.MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"]) + new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"]) expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns) tm.assert_frame_equal(result, expected) @@ -1273,7 +1269,7 @@ def test_unstack_partial( result = result.iloc[1:2].unstack("ix2") expected = DataFrame( [expected_row], - columns=pd.MultiIndex.from_product( + columns=MultiIndex.from_product( [result_columns[2:], [index_product]], names=[None, "ix2"] ), index=Index([2], name="ix1"), diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index a7c22b4983ed7..151ec03662335 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -898,7 +898,7 @@ def test_apply_multi_level_name(category): def test_groupby_apply_datetime_result_dtypes(): # GH 14849 - data = pd.DataFrame.from_records( + data = DataFrame.from_records( [ (pd.Timestamp(2016, 1, 1), "red", "dark", 1, "8"), (pd.Timestamp(2015, 1, 1), "green", "stormy", 2, "9"), diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index c29c9e15ada79..8271d0c45313d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -469,13 +469,13 @@ def test_observed_groups_with_nan(observed): def test_observed_nth(): # GH 26385 - cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"]) + cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"]) ser = Series([1, 2, 3]) df = DataFrame({"cat": cat, "ser": ser}) result = df.groupby("cat", observed=False)["ser"].nth(0) - index = pd.Categorical(["a", "b", "c"], categories=["a", "b", "c"]) + index = Categorical(["a", "b", "c"], categories=["a", "b", "c"]) expected = Series([1, np.nan, np.nan], index=index, name="ser") expected.index.name = "cat" @@ -767,7 +767,7 @@ def test_preserve_categorical_dtype(): def test_preserve_on_ordered_ops(func, values): # gh-18502 # preserve the categoricals on ops - c = pd.Categorical(["first", "second", "third", "fourth"], ordered=True) + c = Categorical(["first", "second", "third", "fourth"], ordered=True) df = DataFrame({"payload": [-1, -2, -1, -2], "col": c}) g = df.groupby("payload") result = getattr(g, func)() @@ -818,10 +818,10 @@ def test_groupby_empty_with_category(): # GH-9614 # test fix for when group by on None resulted in # coercion of dtype categorical -> float - df = DataFrame({"A": [None] * 3, "B": pd.Categorical(["train", "train", "test"])}) + df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])}) result = df.groupby("A").first()["B"] expected = Series( - pd.Categorical([], categories=["test", "train"]), + Categorical([], categories=["test", "train"]), index=Series([], dtype="object", name="A"), name="B", ) @@ -1253,7 +1253,7 @@ def test_groupby_categorical_series_dataframe_consistent(df_cat): def test_groupby_categorical_axis_1(code): # GH 13420 df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]}) - cat = pd.Categorical.from_codes(code, categories=list("abc")) + cat = Categorical.from_codes(code, categories=list("abc")) result = df.groupby(cat, axis=1).mean() expected = df.T.groupby(cat, axis=0).mean().T tm.assert_frame_equal(result, expected) @@ -1269,7 +1269,7 @@ def test_groupby_cat_preserves_structure(observed, ordered): result = ( df.groupby("Name", observed=observed) - .agg(pd.DataFrame.sum, skipna=True) + .agg(DataFrame.sum, skipna=True) .reset_index() ) @@ -1300,8 +1300,8 @@ def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, r df = DataFrame( { - "cat_1": pd.Categorical(list("AABB"), categories=list("ABCD")), - "cat_2": pd.Categorical(list("AB") * 2, categories=list("ABCD")), + "cat_1": Categorical(list("AABB"), categories=list("ABCD")), + "cat_2": Categorical(list("AB") * 2, categories=list("ABCD")), "value": [0.1] * 4, } ) @@ -1333,8 +1333,8 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( df = DataFrame( { - "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), - "cat_2": pd.Categorical(list("AB") * 2, categories=list("ABC")), + "cat_1": Categorical(list("AABB"), categories=list("ABC")), + "cat_2": Categorical(list("AB") * 2, categories=list("ABC")), "value": [0.1] * 4, } ) @@ -1362,15 +1362,15 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func): # GH 23865 # GH 27075 - # Ensure that df.groupby, when 'by' is two pd.Categorical variables, + # Ensure that df.groupby, when 'by' is two Categorical variables, # does not return the categories that are not in df when observed=True if reduction_func == "ngroup": pytest.skip("ngroup does not return the Categories on the index") df = DataFrame( { - "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), - "cat_2": pd.Categorical(list("1111"), categories=list("12")), + "cat_1": Categorical(list("AABB"), categories=list("ABC")), + "cat_2": Categorical(list("1111"), categories=list("12")), "value": [0.1, 0.1, 0.1, 0.1], } ) @@ -1391,7 +1391,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( ): # GH 23865 # GH 27075 - # Ensure that df.groupby, when 'by' is two pd.Categorical variables, + # Ensure that df.groupby, when 'by' is two Categorical variables, # returns the categories that are not in df when observed=False/None if reduction_func == "ngroup": @@ -1399,8 +1399,8 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( df = DataFrame( { - "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), - "cat_2": pd.Categorical(list("1111"), categories=list("12")), + "cat_1": Categorical(list("AABB"), categories=list("ABC")), + "cat_2": Categorical(list("1111"), categories=list("12")), "value": [0.1, 0.1, 0.1, 0.1], } ) @@ -1433,7 +1433,7 @@ def test_series_groupby_categorical_aggregation_getitem(): @pytest.mark.parametrize( "func, expected_values", - [(pd.Series.nunique, [1, 1, 2]), (pd.Series.count, [1, 2, 2])], + [(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])], ) def test_groupby_agg_categorical_columns(func, expected_values): # 31256 @@ -1441,7 +1441,7 @@ def test_groupby_agg_categorical_columns(func, expected_values): { "id": [0, 1, 2, 3, 4], "groups": [0, 1, 1, 2, 2], - "value": pd.Categorical([0, 0, 0, 0, 1]), + "value": Categorical([0, 0, 0, 0, 1]), } ).set_index("id") result = df.groupby("groups").agg(func) @@ -1453,10 +1453,10 @@ def test_groupby_agg_categorical_columns(func, expected_values): def test_groupby_agg_non_numeric(): - df = DataFrame({"A": pd.Categorical(["a", "a", "b"], categories=["a", "b", "c"])}) + df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])}) expected = DataFrame({"A": [2, 1]}, index=[1, 2]) - result = df.groupby([1, 2, 1]).agg(pd.Series.nunique) + result = df.groupby([1, 2, 1]).agg(Series.nunique) tm.assert_frame_equal(result, expected) result = df.groupby([1, 2, 1]).nunique() @@ -1518,7 +1518,7 @@ def test_sorted_missing_category_values(): } ) expected = expected.rename_axis("bar", axis="index") - expected.columns = pd.CategoricalIndex( + expected.columns = CategoricalIndex( ["tiny", "small", "medium", "large"], categories=["tiny", "small", "medium", "large"], ordered=True, @@ -1637,11 +1637,11 @@ def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals( func: str, observed: bool ): # GH 34951 - cat = pd.Categorical([0, 0, 1, 1]) + cat = Categorical([0, 0, 1, 1]) val = [0, 1, 1, 0] df = DataFrame({"a": cat, "b": cat, "c": val}) - idx = pd.Categorical([0, 1]) + idx = Categorical([0, 1]) idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) expected_dict = { "first": Series([0, np.NaN, np.NaN, 1], idx, name="c"), @@ -1662,11 +1662,11 @@ def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals( func: str, observed: bool ): # GH 34951 - cat = pd.Categorical([0, 0, 1, 1]) + cat = Categorical([0, 0, 1, 1]) val = [0, 1, 1, 0] df = DataFrame({"a": cat, "b": cat, "c": val}) - idx = pd.Categorical([0, 1]) + idx = Categorical([0, 1]) idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) expected_dict = { "first": Series([0, np.NaN, np.NaN, 1], idx, name="c"), diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 05a959ea6f22b..e49e69a39b315 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -186,12 +186,12 @@ def test_arg_passthru(): "timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")], "int": [1.5, 3], "datetime": [ - pd.Timestamp("2013-01-01 12:00:00"), - pd.Timestamp("2013-01-03 00:00:00"), + Timestamp("2013-01-01 12:00:00"), + Timestamp("2013-01-03 00:00:00"), ], "datetimetz": [ - pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"), - pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"), + Timestamp("2013-01-01 12:00:00", tz="US/Eastern"), + Timestamp("2013-01-03 00:00:00", tz="US/Eastern"), ], }, index=Index([1, 2], name="group"), @@ -916,14 +916,14 @@ def test_frame_describe_tupleindex(): def test_frame_describe_unstacked_format(): # GH 4792 prices = { - pd.Timestamp("2011-01-06 10:59:05", tz=None): 24990, - pd.Timestamp("2011-01-06 12:43:33", tz=None): 25499, - pd.Timestamp("2011-01-06 12:54:09", tz=None): 25499, + Timestamp("2011-01-06 10:59:05", tz=None): 24990, + Timestamp("2011-01-06 12:43:33", tz=None): 25499, + Timestamp("2011-01-06 12:54:09", tz=None): 25499, } volumes = { - pd.Timestamp("2011-01-06 10:59:05", tz=None): 1500000000, - pd.Timestamp("2011-01-06 12:43:33", tz=None): 5000000000, - pd.Timestamp("2011-01-06 12:54:09", tz=None): 100000000, + Timestamp("2011-01-06 10:59:05", tz=None): 1500000000, + Timestamp("2011-01-06 12:43:33", tz=None): 5000000000, + Timestamp("2011-01-06 12:54:09", tz=None): 100000000, } df = DataFrame({"PRICE": prices, "VOLUME": volumes}) result = df.groupby("PRICE").VOLUME.describe() @@ -957,7 +957,7 @@ def test_describe_with_duplicate_output_column_names(as_index): ) expected = ( - pd.DataFrame.from_records( + DataFrame.from_records( [ ("a", "count", 3.0, 3.0), ("a", "mean", 88.0, 99.0), diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index b57fa2540add9..2563eeeb68672 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -601,7 +601,7 @@ def test_as_index_select_column(): result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum()) expected = Series( - [2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)]) + [2, 6, 6], name="B", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)]) ) tm.assert_series_equal(result, expected) @@ -1190,7 +1190,7 @@ def test_groupby_unit64_float_conversion(): result = df.groupby(["first", "second"])["value"].max() expected = Series( [16148277970000000000], - pd.MultiIndex.from_product([[1], [1]], names=["first", "second"]), + MultiIndex.from_product([[1], [1]], names=["first", "second"]), name="value", ) tm.assert_series_equal(result, expected) @@ -1215,7 +1215,7 @@ def test_groupby_keys_same_size_as_index(): # GH 11185 freq = "s" index = pd.date_range( - start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq + start=Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq ) df = DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index) result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean() @@ -1242,13 +1242,13 @@ def test_groupby_nat_exclude(): "values": np.random.randn(8), "dt": [ np.nan, - pd.Timestamp("2013-01-01"), + Timestamp("2013-01-01"), np.nan, - pd.Timestamp("2013-02-01"), + Timestamp("2013-02-01"), np.nan, - pd.Timestamp("2013-02-01"), + Timestamp("2013-02-01"), np.nan, - pd.Timestamp("2013-01-01"), + Timestamp("2013-01-01"), ], "str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"], } @@ -1724,7 +1724,7 @@ def test_group_shift_with_fill_value(): def test_group_shift_lose_timezone(): # GH 30134 - now_dt = pd.Timestamp.utcnow() + now_dt = Timestamp.utcnow() df = DataFrame({"a": [1, 1], "date": now_dt}) result = df.groupby("a").shift(0).iloc[0] expected = Series({"date": now_dt}, name=result.name) @@ -1781,9 +1781,7 @@ def test_tuple_as_grouping(): def test_tuple_correct_keyerror(): # https://github.com/pandas-dev/pandas/issues/18798 - df = DataFrame( - 1, index=range(3), columns=pd.MultiIndex.from_product([[1, 2], [3, 4]]) - ) + df = DataFrame(1, index=range(3), columns=MultiIndex.from_product([[1, 2], [3, 4]])) with pytest.raises(KeyError, match=r"^\(7, 8\)$"): df.groupby((7, 8)).mean() @@ -1798,7 +1796,7 @@ def test_groupby_agg_ohlc_non_first(): expected = DataFrame( [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], - columns=pd.MultiIndex.from_tuples( + columns=MultiIndex.from_tuples( ( ("foo", "sum", "foo"), ("foo", "ohlc", "open"), @@ -1823,7 +1821,7 @@ def test_groupby_multiindex_nat(): (datetime(2012, 1, 2), "b"), (datetime(2012, 1, 3), "a"), ] - mi = pd.MultiIndex.from_tuples(values, names=["date", None]) + mi = MultiIndex.from_tuples(values, names=["date", None]) ser = Series([3, 2, 2.5, 4], index=mi) result = ser.groupby(level=1).mean() @@ -1844,13 +1842,13 @@ def test_groupby_multiindex_series_keys_len_equal_group_axis(): # GH 25704 index_array = [["x", "x"], ["a", "b"], ["k", "k"]] index_names = ["first", "second", "third"] - ri = pd.MultiIndex.from_arrays(index_array, names=index_names) + ri = MultiIndex.from_arrays(index_array, names=index_names) s = Series(data=[1, 2], index=ri) result = s.groupby(["first", "third"]).sum() index_array = [["x"], ["k"]] index_names = ["first", "third"] - ei = pd.MultiIndex.from_arrays(index_array, names=index_names) + ei = MultiIndex.from_arrays(index_array, names=index_names) expected = Series([3], index=ei) tm.assert_series_equal(result, expected) @@ -1859,7 +1857,7 @@ def test_groupby_multiindex_series_keys_len_equal_group_axis(): def test_groupby_groups_in_BaseGrouper(): # GH 26326 # Test if DataFrame grouped with a pandas.Grouper has correct groups - mi = pd.MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"]) + mi = MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"]) df = DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi) result = df.groupby([pd.Grouper(level="alpha"), "beta"]) expected = df.groupby(["alpha", "beta"]) @@ -1885,7 +1883,7 @@ def test_groupby_axis_1(group_name): # test on MI column iterables = [["bar", "baz", "foo"], ["one", "two"]] - mi = pd.MultiIndex.from_product(iterables=iterables, names=["x", "x1"]) + mi = MultiIndex.from_product(iterables=iterables, names=["x", "x1"]) df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi) results = df.groupby(group_name, axis=1).sum() expected = df.T.groupby(group_name).sum().T @@ -1990,7 +1988,7 @@ def test_bool_aggs_dup_column_labels(bool_agg_func): @pytest.mark.parametrize( - "idx", [Index(["a", "a"]), pd.MultiIndex.from_tuples((("a", "a"), ("a", "a")))] + "idx", [Index(["a", "a"]), MultiIndex.from_tuples((("a", "a"), ("a", "a")))] ) @pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_dup_labels_output_shape(groupby_func, idx): @@ -2006,7 +2004,7 @@ def test_dup_labels_output_shape(groupby_func, idx): elif groupby_func == "corrwith": args.append(df) elif groupby_func == "tshift": - df.index = [pd.Timestamp("today")] + df.index = [Timestamp("today")] args.extend([1, "D"]) result = getattr(grp_by, groupby_func)(*args) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 6c74e1521eeeb..1cacc5bb5ca6e 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -157,7 +157,7 @@ def test_grouper_multilevel_freq(self): d0 = date.today() - timedelta(days=14) dates = date_range(d0, date.today()) - date_index = pd.MultiIndex.from_product([dates, dates], names=["foo", "bar"]) + date_index = MultiIndex.from_product([dates, dates], names=["foo", "bar"]) df = DataFrame(np.random.randint(0, 100, 225), index=date_index) # Check string level @@ -233,7 +233,7 @@ def test_grouper_creation_bug(self): # GH8866 s = Series( np.arange(8, dtype="int64"), - index=pd.MultiIndex.from_product( + index=MultiIndex.from_product( [list("ab"), range(2), date_range("20130101", periods=2)], names=["one", "two", "three"], ), @@ -254,7 +254,7 @@ def test_grouper_column_and_index(self): # Grouping a multi-index frame by a column and an index level should # be equivalent to resetting the index and grouping by two columns - idx = pd.MultiIndex.from_tuples( + idx = MultiIndex.from_tuples( [("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)] ) idx.names = ["outer", "inner"] @@ -286,9 +286,7 @@ def test_grouper_column_and_index(self): def test_groupby_levels_and_columns(self): # GH9344, GH9049 idx_names = ["x", "y"] - idx = pd.MultiIndex.from_tuples( - [(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names - ) + idx = MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names) df = DataFrame(np.arange(12).reshape(-1, 3), index=idx) by_levels = df.groupby(level=idx_names).mean() @@ -330,7 +328,7 @@ def test_grouper_getting_correct_binner(self): # and specifying levels df = DataFrame( {"A": 1}, - index=pd.MultiIndex.from_product( + index=MultiIndex.from_product( [list("ab"), date_range("20130101", periods=80)], names=["one", "two"] ), ) @@ -408,7 +406,7 @@ def test_multiindex_passthru(self): # GH 7997 # regression from 0.14.1 df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - df.columns = pd.MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)]) + df.columns = MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)]) result = df.groupby(axis=1, level=[0, 1]).first() tm.assert_frame_equal(result, df) @@ -465,7 +463,7 @@ def test_groupby_multiindex_tuple(self): # GH 17979 df = DataFrame( [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]], - columns=pd.MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]), + columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]), ) expected = df.groupby([("b", 1)]).groups result = df.groupby(("b", 1)).groups @@ -473,7 +471,7 @@ def test_groupby_multiindex_tuple(self): df2 = DataFrame( df.values, - columns=pd.MultiIndex.from_arrays( + columns=MultiIndex.from_arrays( [["a", "b", "b", "c"], ["d", "d", "e", "e"]] ), ) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index e82b23054f1cb..df1d7819a1894 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -94,7 +94,7 @@ def test_nth_with_na_object(index, nulls_fixture): def test_first_last_with_None(method): # https://github.com/pandas-dev/pandas/issues/32800 # None should be preserved as object dtype - df = pd.DataFrame.from_dict({"id": ["a"], "value": [None]}) + df = DataFrame.from_dict({"id": ["a"], "value": [None]}) groups = df.groupby("id", as_index=False) result = getattr(groups, method)() @@ -152,8 +152,8 @@ def test_first_strings_timestamps(): # GH 11244 test = DataFrame( { - pd.Timestamp("2012-01-01 00:00:00"): ["a", "b"], - pd.Timestamp("2012-01-02 00:00:00"): ["c", "d"], + Timestamp("2012-01-01 00:00:00"): ["a", "b"], + Timestamp("2012-01-02 00:00:00"): ["c", "d"], "name": ["e", "e"], "aaaa": ["f", "g"], } diff --git a/pandas/tests/groupby/test_nunique.py b/pandas/tests/groupby/test_nunique.py index 7edb358170b50..22970eff28f19 100644 --- a/pandas/tests/groupby/test_nunique.py +++ b/pandas/tests/groupby/test_nunique.py @@ -121,7 +121,7 @@ def test_nunique_with_timegrouper(): } ).set_index("time") result = test.groupby(pd.Grouper(freq="h"))["data"].nunique() - expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(pd.Series.nunique) + expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 0a1232d3f24da..612079447576f 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -363,7 +363,7 @@ def test_timegrouper_get_group(self): for df in [df_original, df_reordered]: grouped = df.groupby(pd.Grouper(freq="M", key="Date")) for t, expected in zip(dt_list, expected_list): - dt = pd.Timestamp(t) + dt = Timestamp(t) result = grouped.get_group(dt) tm.assert_frame_equal(result, expected) @@ -378,7 +378,7 @@ def test_timegrouper_get_group(self): for df in [df_original, df_reordered]: grouped = df.groupby(["Buyer", pd.Grouper(freq="M", key="Date")]) for (b, t), expected in zip(g_list, expected_list): - dt = pd.Timestamp(t) + dt = Timestamp(t) result = grouped.get_group((b, dt)) tm.assert_frame_equal(result, expected) @@ -395,7 +395,7 @@ def test_timegrouper_get_group(self): for df in [df_original, df_reordered]: grouped = df.groupby(pd.Grouper(freq="M")) for t, expected in zip(dt_list, expected_list): - dt = pd.Timestamp(t) + dt = Timestamp(t) result = grouped.get_group(dt) tm.assert_frame_equal(result, expected) @@ -452,7 +452,7 @@ def test_groupby_groups_datetimeindex(self): result = df.groupby(level="date").groups dates = ["2015-01-05", "2015-01-04", "2015-01-03", "2015-01-02", "2015-01-01"] expected = { - pd.Timestamp(date): pd.DatetimeIndex([date], name="date") for date in dates + Timestamp(date): pd.DatetimeIndex([date], name="date") for date in dates } tm.assert_dict_equal(result, expected) @@ -662,9 +662,9 @@ def test_groupby_datetime64_32_bit(self): # GH 6410 / numpy 4328 # 32-bit under 1.9-dev indexing issue - df = DataFrame({"A": range(2), "B": [pd.Timestamp("2000-01-1")] * 2}) + df = DataFrame({"A": range(2), "B": [Timestamp("2000-01-1")] * 2}) result = df.groupby("A")["B"].transform(min) - expected = Series([pd.Timestamp("2000-01-1")] * 2, name="B") + expected = Series([Timestamp("2000-01-1")] * 2, name="B") tm.assert_series_equal(result, expected) def test_groupby_with_timezone_selection(self): diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index cd3c2771db8a4..1aeff7426c33a 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -106,10 +106,10 @@ def test_transform_fast(): result = df.groupby("grouping").transform("first") dates = [ - pd.Timestamp("2014-1-1"), - pd.Timestamp("2014-1-2"), - pd.Timestamp("2014-1-2"), - pd.Timestamp("2014-1-4"), + Timestamp("2014-1-1"), + Timestamp("2014-1-2"), + Timestamp("2014-1-2"), + Timestamp("2014-1-4"), ] expected = DataFrame( {"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]}, @@ -393,9 +393,9 @@ def test_series_fast_transform_date(): result = df.groupby("grouping")["d"].transform("first") dates = [ pd.NaT, - pd.Timestamp("2014-1-2"), - pd.Timestamp("2014-1-2"), - pd.Timestamp("2014-1-4"), + Timestamp("2014-1-2"), + Timestamp("2014-1-2"), + Timestamp("2014-1-4"), ] expected = Series(dates, name="d") tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 2bfe9bf0194ec..cf2430d041d88 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -42,7 +42,7 @@ def test_can_hold_identifiers(self): def test_disallow_addsub_ops(self, func, op_name): # GH 10039 # set ops (+/-) raise TypeError - idx = Index(pd.Categorical(["a", "b"])) + idx = Index(Categorical(["a", "b"])) cat_or_list = "'(Categorical|list)' and '(Categorical|list)'" msg = "|".join( [ @@ -182,7 +182,7 @@ def test_insert(self): tm.assert_index_equal(result, expected) def test_insert_na_mismatched_dtype(self): - ci = pd.CategoricalIndex([0, 1, 1]) + ci = CategoricalIndex([0, 1, 1]) msg = "'fill_value=NaT' is not present in this Categorical's categories" with pytest.raises(ValueError, match=msg): ci.insert(0, pd.NaT) @@ -437,15 +437,15 @@ def test_equals_categorical(self): def test_equals_categorical_unordered(self): # https://github.com/pandas-dev/pandas/issues/16603 - a = pd.CategoricalIndex(["A"], categories=["A", "B"]) - b = pd.CategoricalIndex(["A"], categories=["B", "A"]) - c = pd.CategoricalIndex(["C"], categories=["B", "A"]) + a = CategoricalIndex(["A"], categories=["A", "B"]) + b = CategoricalIndex(["A"], categories=["B", "A"]) + c = CategoricalIndex(["C"], categories=["B", "A"]) assert a.equals(b) assert not a.equals(c) assert not b.equals(c) def test_frame_repr(self): - df = pd.DataFrame({"A": [1, 2, 3]}, index=pd.CategoricalIndex(["a", "b", "c"])) + df = pd.DataFrame({"A": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"])) result = repr(df) expected = " A\na 1\nb 2\nc 3" assert result == expected @@ -464,11 +464,11 @@ def test_engine_type(self, dtype, engine_type): # num. of uniques required to push CategoricalIndex.codes to a # dtype (128 categories required for .codes dtype to be int16 etc.) num_uniques = {np.int8: 1, np.int16: 128, np.int32: 32768}[dtype] - ci = pd.CategoricalIndex(range(num_uniques)) + ci = CategoricalIndex(range(num_uniques)) else: # having 2**32 - 2**31 categories would be very memory-intensive, # so we cheat a bit with the dtype - ci = pd.CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1) + ci = CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1) ci.values._codes = ci.values._codes.astype("int64") assert np.issubdtype(ci.codes.dtype, dtype) assert isinstance(ci._engine, engine_type) diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py index c720547aab3f8..3aa8710c6a6c8 100644 --- a/pandas/tests/indexes/categorical/test_indexing.py +++ b/pandas/tests/indexes/categorical/test_indexing.py @@ -355,7 +355,7 @@ def test_contains_na_dtype(self, unwrap): (1.5, True), (pd.Interval(0.5, 1.5), False), ("a", False), - (pd.Timestamp(1), False), + (Timestamp(1), False), (pd.Timedelta(1), False), ], ids=str, diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index 13b658b31e3ee..ad9a2f112caac 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -268,9 +268,9 @@ def _check_rng(rng): ) def test_integer_index_astype_datetime(self, tz, dtype): # GH 20997, 20964, 24559 - val = [pd.Timestamp("2018-01-01", tz=tz).value] + val = [Timestamp("2018-01-01", tz=tz).value] result = Index(val, name="idx").astype(dtype) - expected = pd.DatetimeIndex(["2018-01-01"], tz=tz, name="idx") + expected = DatetimeIndex(["2018-01-01"], tz=tz, name="idx") tm.assert_index_equal(result, expected) def test_dti_astype_period(self): @@ -291,7 +291,7 @@ def test_astype_category(self, tz): obj = pd.date_range("2000", periods=2, tz=tz, name="idx") result = obj.astype("category") expected = pd.CategoricalIndex( - [pd.Timestamp("2000-01-01", tz=tz), pd.Timestamp("2000-01-02", tz=tz)], + [Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)], name="idx", ) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 0c562e7b8f848..48f87664d5141 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -27,9 +27,9 @@ def test_freq_validation_with_nat(self, dt_cls): "to passed frequency D" ) with pytest.raises(ValueError, match=msg): - dt_cls([pd.NaT, pd.Timestamp("2011-01-01")], freq="D") + dt_cls([pd.NaT, Timestamp("2011-01-01")], freq="D") with pytest.raises(ValueError, match=msg): - dt_cls([pd.NaT, pd.Timestamp("2011-01-01").value], freq="D") + dt_cls([pd.NaT, Timestamp("2011-01-01").value], freq="D") # TODO: better place for tests shared by DTI/TDI? @pytest.mark.parametrize( @@ -55,7 +55,7 @@ def test_categorical_preserves_tz(self): # TODO: parametrize over DatetimeIndex/DatetimeArray # once CategoricalIndex(DTA) works - dti = pd.DatetimeIndex( + dti = DatetimeIndex( [pd.NaT, "2015-01-01", "1999-04-06 15:14:13", "2015-01-01"], tz="US/Eastern" ) @@ -64,7 +64,7 @@ def test_categorical_preserves_tz(self): cser = pd.Series(ci) for obj in [ci, carr, cser]: - result = pd.DatetimeIndex(obj) + result = DatetimeIndex(obj) tm.assert_index_equal(result, dti) def test_dti_with_period_data_raises(self): @@ -106,9 +106,9 @@ def test_construction_caching(self): "dt": pd.date_range("20130101", periods=3), "dttz": pd.date_range("20130101", periods=3, tz="US/Eastern"), "dt_with_null": [ - pd.Timestamp("20130101"), + Timestamp("20130101"), pd.NaT, - pd.Timestamp("20130103"), + Timestamp("20130103"), ], "dtns": pd.date_range("20130101", periods=3, freq="ns"), } @@ -463,13 +463,13 @@ def test_construction_dti_with_mixed_timezones(self): ) def test_construction_base_constructor(self): - arr = [pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")] - tm.assert_index_equal(Index(arr), pd.DatetimeIndex(arr)) - tm.assert_index_equal(Index(np.array(arr)), pd.DatetimeIndex(np.array(arr))) + arr = [Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-03")] + tm.assert_index_equal(Index(arr), DatetimeIndex(arr)) + tm.assert_index_equal(Index(np.array(arr)), DatetimeIndex(np.array(arr))) - arr = [np.nan, pd.NaT, pd.Timestamp("2011-01-03")] - tm.assert_index_equal(Index(arr), pd.DatetimeIndex(arr)) - tm.assert_index_equal(Index(np.array(arr)), pd.DatetimeIndex(np.array(arr))) + arr = [np.nan, pd.NaT, Timestamp("2011-01-03")] + tm.assert_index_equal(Index(arr), DatetimeIndex(arr)) + tm.assert_index_equal(Index(np.array(arr)), DatetimeIndex(np.array(arr))) def test_construction_outofbounds(self): # GH 13663 @@ -503,13 +503,13 @@ def test_integer_values_and_tz_interpreted_as_utc(self): result = DatetimeIndex(values).tz_localize("US/Central") - expected = pd.DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central") + expected = DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central") tm.assert_index_equal(result, expected) # but UTC is *not* deprecated. with tm.assert_produces_warning(None): result = DatetimeIndex(values, tz="UTC") - expected = pd.DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central") + expected = DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central") def test_constructor_coverage(self): rng = date_range("1/1/2000", periods=10.5) @@ -755,15 +755,15 @@ def test_construction_int_rountrip(self, tz_naive_fixture): def test_construction_from_replaced_timestamps_with_dst(self): # GH 18785 index = pd.date_range( - pd.Timestamp(2000, 1, 1), - pd.Timestamp(2005, 1, 1), + Timestamp(2000, 1, 1), + Timestamp(2005, 1, 1), freq="MS", tz="Australia/Melbourne", ) test = pd.DataFrame({"data": range(len(index))}, index=index) test = test.resample("Y").mean() - result = pd.DatetimeIndex([x.replace(month=6, day=1) for x in test.index]) - expected = pd.DatetimeIndex( + result = DatetimeIndex([x.replace(month=6, day=1) for x in test.index]) + expected = DatetimeIndex( [ "2000-06-01 00:00:00", "2001-06-01 00:00:00", @@ -801,7 +801,7 @@ def test_constructor_with_ambiguous_keyword_arg(self): # ambiguous keyword in start timezone = "America/New_York" - start = pd.Timestamp(year=2020, month=11, day=1, hour=1).tz_localize( + start = Timestamp(year=2020, month=11, day=1, hour=1).tz_localize( timezone, ambiguous=False ) result = pd.date_range(start=start, periods=2, ambiguous=False) @@ -809,7 +809,7 @@ def test_constructor_with_ambiguous_keyword_arg(self): # ambiguous keyword in end timezone = "America/New_York" - end = pd.Timestamp(year=2020, month=11, day=2, hour=1).tz_localize( + end = Timestamp(year=2020, month=11, day=2, hour=1).tz_localize( timezone, ambiguous=False ) result = pd.date_range(end=end, periods=2, ambiguous=False) @@ -821,28 +821,28 @@ def test_constructor_with_nonexistent_keyword_arg(self): timezone = "Europe/Warsaw" # nonexistent keyword in start - start = pd.Timestamp("2015-03-29 02:30:00").tz_localize( + start = Timestamp("2015-03-29 02:30:00").tz_localize( timezone, nonexistent="shift_forward" ) result = pd.date_range(start=start, periods=2, freq="H") expected = DatetimeIndex( [ - pd.Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), - pd.Timestamp("2015-03-29 04:00:00+02:00", tz=timezone), + Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), + Timestamp("2015-03-29 04:00:00+02:00", tz=timezone), ] ) tm.assert_index_equal(result, expected) # nonexistent keyword in end - end = pd.Timestamp("2015-03-29 02:30:00").tz_localize( + end = Timestamp("2015-03-29 02:30:00").tz_localize( timezone, nonexistent="shift_forward" ) result = pd.date_range(end=end, periods=2, freq="H") expected = DatetimeIndex( [ - pd.Timestamp("2015-03-29 01:00:00+01:00", tz=timezone), - pd.Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), + Timestamp("2015-03-29 01:00:00+01:00", tz=timezone), + Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), ] ) @@ -853,7 +853,7 @@ def test_constructor_no_precision_raises(self): msg = "with no precision is not allowed" with pytest.raises(ValueError, match=msg): - pd.DatetimeIndex(["2000"], dtype="datetime64") + DatetimeIndex(["2000"], dtype="datetime64") with pytest.raises(ValueError, match=msg): Index(["2000"], dtype="datetime64") @@ -861,7 +861,7 @@ def test_constructor_no_precision_raises(self): def test_constructor_wrong_precision_raises(self): msg = "Unexpected value for 'dtype': 'datetime64\\[us\\]'" with pytest.raises(ValueError, match=msg): - pd.DatetimeIndex(["2000"], dtype="datetime64[us]") + DatetimeIndex(["2000"], dtype="datetime64[us]") def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(self): # GH 27011 @@ -1088,7 +1088,7 @@ def test_timestamp_constructor_fold_conflict(ts_input, fold): def test_timestamp_constructor_retain_fold(tz, fold): # Test for #25057 # Check that we retain fold - ts = pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30, tz=tz, fold=fold) + ts = Timestamp(year=2019, month=10, day=27, hour=1, minute=30, tz=tz, fold=fold) result = ts.fold expected = fold assert result == expected @@ -1110,7 +1110,7 @@ def test_timestamp_constructor_infer_fold_from_value(tz, ts_input, fold_out): # Test for #25057 # Check that we infer fold correctly based on timestamps since utc # or strings - ts = pd.Timestamp(ts_input, tz=tz) + ts = Timestamp(ts_input, tz=tz) result = ts.fold expected = fold_out assert result == expected @@ -1128,7 +1128,7 @@ def test_timestamp_constructor_adjust_value_for_fold(tz, ts_input, fold, value_o # Test for #25057 # Check that we adjust value for fold correctly # based on timestamps since utc - ts = pd.Timestamp(ts_input, tz=tz, fold=fold) + ts = Timestamp(ts_input, tz=tz, fold=fold) result = ts.value expected = value_out assert result == expected diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 9d867df147096..237c82436eb84 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -161,7 +161,7 @@ def test_date_range_gen_error(self): def test_begin_year_alias(self, freq): # see gh-9313 rng = date_range("1/1/2013", "7/1/2017", freq=freq) - exp = pd.DatetimeIndex( + exp = DatetimeIndex( ["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"], freq=freq, ) @@ -171,7 +171,7 @@ def test_begin_year_alias(self, freq): def test_end_year_alias(self, freq): # see gh-9313 rng = date_range("1/1/2013", "7/1/2017", freq=freq) - exp = pd.DatetimeIndex( + exp = DatetimeIndex( ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq ) tm.assert_index_equal(rng, exp) @@ -180,7 +180,7 @@ def test_end_year_alias(self, freq): def test_business_end_year_alias(self, freq): # see gh-9313 rng = date_range("1/1/2013", "7/1/2017", freq=freq) - exp = pd.DatetimeIndex( + exp = DatetimeIndex( ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq ) tm.assert_index_equal(rng, exp) @@ -188,12 +188,12 @@ def test_business_end_year_alias(self, freq): def test_date_range_negative_freq(self): # GH 11018 rng = date_range("2011-12-31", freq="-2A", periods=3) - exp = pd.DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A") + exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A") tm.assert_index_equal(rng, exp) assert rng.freq == "-2A" rng = date_range("2011-01-31", freq="-2M", periods=3) - exp = pd.DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M") + exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M") tm.assert_index_equal(rng, exp) assert rng.freq == "-2M" @@ -778,10 +778,10 @@ def test_precision_finer_than_offset(self): @pytest.mark.parametrize( "start,end", [ - (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2)), - (pd.Timestamp(dt1), pd.Timestamp(dt2, tz=tz2)), - (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2, tz=tz2)), - (pd.Timestamp(dt1, tz=tz2), pd.Timestamp(dt2, tz=tz1)), + (Timestamp(dt1, tz=tz1), Timestamp(dt2)), + (Timestamp(dt1), Timestamp(dt2, tz=tz2)), + (Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)), + (Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)), ], ) def test_mismatching_tz_raises_err(self, start, end): @@ -859,15 +859,15 @@ def test_bdays_and_open_boundaries(self, closed): def test_bday_near_overflow(self): # GH#24252 avoid doing unnecessary addition that _would_ overflow - start = pd.Timestamp.max.floor("D").to_pydatetime() + start = Timestamp.max.floor("D").to_pydatetime() rng = pd.date_range(start, end=None, periods=1, freq="B") - expected = pd.DatetimeIndex([start], freq="B") + expected = DatetimeIndex([start], freq="B") tm.assert_index_equal(rng, expected) def test_bday_overflow_error(self): # GH#24252 check that we get OutOfBoundsDatetime and not OverflowError msg = "Out of bounds nanosecond timestamp" - start = pd.Timestamp.max.floor("D").to_pydatetime() + start = Timestamp.max.floor("D").to_pydatetime() with pytest.raises(OutOfBoundsDatetime, match=msg): pd.date_range(start, periods=2, freq="B") @@ -1004,7 +1004,7 @@ def test_date_range_with_custom_holidays(): # GH 30593 freq = pd.offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"]) result = pd.date_range(start="2020-11-25 15:00", periods=4, freq=freq) - expected = pd.DatetimeIndex( + expected = DatetimeIndex( [ "2020-11-25 15:00:00", "2020-11-25 16:00:00", diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 8e2ac4feb7ded..b801f750718ac 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -152,7 +152,7 @@ def test_iteration_preserves_tz(self): assert result == expected # 9100 - index = pd.DatetimeIndex( + index = DatetimeIndex( ["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"] ) for i, ts in enumerate(index): @@ -254,7 +254,7 @@ def test_ns_index(self): dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, "ns") freq = ns * offsets.Nano() - index = pd.DatetimeIndex(dt, freq=freq, name="time") + index = DatetimeIndex(dt, freq=freq, name="time") self.assert_index_parameters(index) new_index = pd.date_range(start=index[0], end=index[-1], freq=index.freq) @@ -284,7 +284,7 @@ def test_factorize(self): tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) - idx2 = pd.DatetimeIndex( + idx2 = DatetimeIndex( ["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"] ) @@ -340,10 +340,10 @@ def test_factorize_dst(self): @pytest.mark.parametrize( "arr, expected", [ - (pd.DatetimeIndex(["2017", "2017"]), pd.DatetimeIndex(["2017"])), + (DatetimeIndex(["2017", "2017"]), DatetimeIndex(["2017"])), ( - pd.DatetimeIndex(["2017", "2017"], tz="US/Eastern"), - pd.DatetimeIndex(["2017"], tz="US/Eastern"), + DatetimeIndex(["2017", "2017"], tz="US/Eastern"), + DatetimeIndex(["2017"], tz="US/Eastern"), ), ], ) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index e0506df5cd939..4e46eb126894b 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -27,8 +27,8 @@ def test_ellipsis(self): def test_getitem_slice_keeps_name(self): # GH4226 - st = pd.Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles") - et = pd.Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles") + st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles") + et = Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles") dr = pd.date_range(st, et, freq="H", name="timebucket") assert dr[1:].name == dr.name @@ -321,23 +321,19 @@ def test_take2(self, tz): def test_take_fill_value(self): # GH#12631 - idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx") + idx = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx") result = idx.take(np.array([1, 0, -1])) - expected = pd.DatetimeIndex( - ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" - ) + expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx") tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx") + expected = DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx") tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) - expected = pd.DatetimeIndex( - ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" - ) + expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx") tm.assert_index_equal(result, expected) msg = ( @@ -354,25 +350,25 @@ def test_take_fill_value(self): idx.take(np.array([1, -5])) def test_take_fill_value_with_timezone(self): - idx = pd.DatetimeIndex( + idx = DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", tz="US/Eastern" ) result = idx.take(np.array([1, 0, -1])) - expected = pd.DatetimeIndex( + expected = DatetimeIndex( ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern" ) tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = pd.DatetimeIndex( + expected = DatetimeIndex( ["2011-02-01", "2011-01-01", "NaT"], name="xxx", tz="US/Eastern" ) tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) - expected = pd.DatetimeIndex( + expected = DatetimeIndex( ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern" ) tm.assert_index_equal(result, expected) @@ -475,7 +471,7 @@ def test_get_loc_time_nat(self): # GH#35114 # Case where key's total microseconds happens to match iNaT % 1e6 // 1000 tic = time(minute=12, second=43, microsecond=145224) - dti = pd.DatetimeIndex([pd.NaT]) + dti = DatetimeIndex([pd.NaT]) loc = dti.get_loc(tic) expected = np.array([], dtype=np.intp) @@ -484,11 +480,11 @@ def test_get_loc_time_nat(self): def test_get_loc_tz_aware(self): # https://github.com/pandas-dev/pandas/issues/32140 dti = pd.date_range( - pd.Timestamp("2019-12-12 00:00:00", tz="US/Eastern"), - pd.Timestamp("2019-12-13 00:00:00", tz="US/Eastern"), + Timestamp("2019-12-12 00:00:00", tz="US/Eastern"), + Timestamp("2019-12-13 00:00:00", tz="US/Eastern"), freq="5s", ) - key = pd.Timestamp("2019-12-12 10:19:25", tz="US/Eastern") + key = Timestamp("2019-12-12 10:19:25", tz="US/Eastern") result = dti.get_loc(key, method="nearest") assert result == 7433 @@ -589,15 +585,13 @@ def test_get_indexer(self): @pytest.mark.parametrize( "target", [ - [date(2020, 1, 1), pd.Timestamp("2020-01-02")], - [pd.Timestamp("2020-01-01"), date(2020, 1, 2)], + [date(2020, 1, 1), Timestamp("2020-01-02")], + [Timestamp("2020-01-01"), date(2020, 1, 2)], ], ) def test_get_indexer_mixed_dtypes(self, target): # https://github.com/pandas-dev/pandas/issues/33741 - values = pd.DatetimeIndex( - [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")] - ) + values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")]) result = values.get_indexer(target) expected = np.array([0, 1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) @@ -605,15 +599,13 @@ def test_get_indexer_mixed_dtypes(self, target): @pytest.mark.parametrize( "target, positions", [ - ([date(9999, 1, 1), pd.Timestamp("2020-01-01")], [-1, 0]), - ([pd.Timestamp("2020-01-01"), date(9999, 1, 1)], [0, -1]), + ([date(9999, 1, 1), Timestamp("2020-01-01")], [-1, 0]), + ([Timestamp("2020-01-01"), date(9999, 1, 1)], [0, -1]), ([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]), ], ) def test_get_indexer_out_of_bounds_date(self, target, positions): - values = pd.DatetimeIndex( - [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")] - ) + values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")]) result = values.get_indexer(target) expected = np.array(positions, dtype=np.intp) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 4c632aba51c45..9cf0d2035fa67 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -48,7 +48,7 @@ def test_repeat_range(self, tz_naive_fixture): assert len(result) == 5 * len(rng) index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz) - exp = pd.DatetimeIndex( + exp = DatetimeIndex( ["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz ) for res in [index.repeat(2), np.repeat(index, 2)]: @@ -56,15 +56,15 @@ def test_repeat_range(self, tz_naive_fixture): assert res.freq is None index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz) - exp = pd.DatetimeIndex( + exp = DatetimeIndex( ["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz ) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) assert res.freq is None - index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz) - exp = pd.DatetimeIndex( + index = DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz) + exp = DatetimeIndex( [ "2001-01-01", "2001-01-01", @@ -296,23 +296,23 @@ def test_drop_duplicates(self, freq_sample, keep, expected, index): def test_infer_freq(self, freq_sample): # GH 11018 idx = pd.date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10) - result = pd.DatetimeIndex(idx.asi8, freq="infer") + result = DatetimeIndex(idx.asi8, freq="infer") tm.assert_index_equal(idx, result) assert result.freq == freq_sample def test_nat(self, tz_naive_fixture): tz = tz_naive_fixture - assert pd.DatetimeIndex._na_value is pd.NaT - assert pd.DatetimeIndex([])._na_value is pd.NaT + assert DatetimeIndex._na_value is pd.NaT + assert DatetimeIndex([])._na_value is pd.NaT - idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) + idx = DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) assert idx._can_hold_na tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) assert idx.hasnans is False tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp)) - idx = pd.DatetimeIndex(["2011-01-01", "NaT"], tz=tz) + idx = DatetimeIndex(["2011-01-01", "NaT"], tz=tz) assert idx._can_hold_na tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) @@ -321,7 +321,7 @@ def test_nat(self, tz_naive_fixture): def test_equals(self): # GH 13107 - idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"]) + idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"]) assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.astype(object)) @@ -330,7 +330,7 @@ def test_equals(self): assert not idx.equals(list(idx)) assert not idx.equals(Series(idx)) - idx2 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific") + idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific") assert not idx.equals(idx2) assert not idx.equals(idx2.copy()) assert not idx.equals(idx2.astype(object)) @@ -339,7 +339,7 @@ def test_equals(self): assert not idx.equals(Series(idx2)) # same internal, different tz - idx3 = pd.DatetimeIndex(idx.asi8, tz="US/Pacific") + idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific") tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) assert not idx.equals(idx3) assert not idx.equals(idx3.copy()) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index c02441b5883df..93c92c0b8f1ab 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -57,15 +57,15 @@ def test_union(self, tz, sort): rng1 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz) other1 = pd.date_range("1/6/2000", freq="D", periods=5, tz=tz) expected1 = pd.date_range("1/1/2000", freq="D", periods=10, tz=tz) - expected1_notsorted = pd.DatetimeIndex(list(other1) + list(rng1)) + expected1_notsorted = DatetimeIndex(list(other1) + list(rng1)) rng2 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz) other2 = pd.date_range("1/4/2000", freq="D", periods=5, tz=tz) expected2 = pd.date_range("1/1/2000", freq="D", periods=8, tz=tz) - expected2_notsorted = pd.DatetimeIndex(list(other2) + list(rng2[:3])) + expected2_notsorted = DatetimeIndex(list(other2) + list(rng2[:3])) rng3 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz) - other3 = pd.DatetimeIndex([], tz=tz) + other3 = DatetimeIndex([], tz=tz) expected3 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz) expected3_notsorted = rng3 @@ -307,17 +307,17 @@ def test_intersection_bug_1708(self): def test_difference(self, tz, sort): rng_dates = ["1/2/2000", "1/3/2000", "1/1/2000", "1/4/2000", "1/5/2000"] - rng1 = pd.DatetimeIndex(rng_dates, tz=tz) + rng1 = DatetimeIndex(rng_dates, tz=tz) other1 = pd.date_range("1/6/2000", freq="D", periods=5, tz=tz) - expected1 = pd.DatetimeIndex(rng_dates, tz=tz) + expected1 = DatetimeIndex(rng_dates, tz=tz) - rng2 = pd.DatetimeIndex(rng_dates, tz=tz) + rng2 = DatetimeIndex(rng_dates, tz=tz) other2 = pd.date_range("1/4/2000", freq="D", periods=5, tz=tz) - expected2 = pd.DatetimeIndex(rng_dates[:3], tz=tz) + expected2 = DatetimeIndex(rng_dates[:3], tz=tz) - rng3 = pd.DatetimeIndex(rng_dates, tz=tz) - other3 = pd.DatetimeIndex([], tz=tz) - expected3 = pd.DatetimeIndex(rng_dates, tz=tz) + rng3 = DatetimeIndex(rng_dates, tz=tz) + other3 = DatetimeIndex([], tz=tz) + expected3 = DatetimeIndex(rng_dates, tz=tz) for rng, other, expected in [ (rng1, other1, expected1), @@ -416,7 +416,7 @@ def test_union(self, sort): if sort is None: tm.assert_index_equal(right.union(left, sort=sort), the_union) else: - expected = pd.DatetimeIndex(list(right) + list(left)) + expected = DatetimeIndex(list(right) + list(left)) tm.assert_index_equal(right.union(left, sort=sort), expected) # overlapping, but different offset @@ -433,7 +433,7 @@ def test_union_not_cacheable(self, sort): if sort is None: tm.assert_index_equal(the_union, rng) else: - expected = pd.DatetimeIndex(list(rng[10:]) + list(rng[:10])) + expected = DatetimeIndex(list(rng[10:]) + list(rng[:10])) tm.assert_index_equal(the_union, expected) rng1 = rng[10:] @@ -471,7 +471,7 @@ def test_intersection_bug(self): def test_intersection_list(self): # GH#35876 values = [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")] - idx = pd.DatetimeIndex(values, name="a") + idx = DatetimeIndex(values, name="a") res = idx.intersection(values) tm.assert_index_equal(res, idx.rename(None)) diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 233835bb4b5f7..8a73f564ef064 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -872,8 +872,8 @@ def test_drop_dst_boundary(self): tz = "Europe/Brussels" freq = "15min" - start = pd.Timestamp("201710290100", tz=tz) - end = pd.Timestamp("201710290300", tz=tz) + start = Timestamp("201710290100", tz=tz) + end = Timestamp("201710290300", tz=tz) index = pd.date_range(start=start, end=end, freq=freq) expected = DatetimeIndex( @@ -1142,15 +1142,15 @@ def test_dti_union_aware(self): def test_dti_union_mixed(self): # GH 21671 - rng = DatetimeIndex([pd.Timestamp("2011-01-01"), pd.NaT]) - rng2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo") + rng = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT]) + rng2 = DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo") result = rng.union(rng2) expected = Index( [ - pd.Timestamp("2011-01-01"), + Timestamp("2011-01-01"), pd.NaT, - pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), - pd.Timestamp("2012-01-02", tz="Asia/Tokyo"), + Timestamp("2012-01-01", tz="Asia/Tokyo"), + Timestamp("2012-01-02", tz="Asia/Tokyo"), ], dtype=object, ) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 17a1c69858c11..1fbbb12b64dc5 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -868,7 +868,7 @@ def test_set_closed_errors(self, bad_closed): def test_is_all_dates(self): # GH 23576 year_2017 = pd.Interval( - pd.Timestamp("2017-01-01 00:00:00"), pd.Timestamp("2018-01-01 00:00:00") + Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00") ) year_2017_index = pd.IntervalIndex([year_2017]) assert not year_2017_index._is_all_dates @@ -912,7 +912,7 @@ def test_searchsorted_different_argument_classes(klass): @pytest.mark.parametrize( - "arg", [[1, 2], ["a", "b"], [pd.Timestamp("2020-01-01", tz="Europe/London")] * 2] + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] ) def test_searchsorted_invalid_argument(arg): values = IntervalIndex([Interval(0, 1), Interval(1, 2)]) diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py index 67ebfcddf6c2d..a77af84ee1ed0 100644 --- a/pandas/tests/indexes/multi/conftest.py +++ b/pandas/tests/indexes/multi/conftest.py @@ -63,7 +63,7 @@ def narrow_multi_index(): n = 1000 ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n)) dti = pd.date_range("2000-01-01", freq="s", periods=n * 2) - return pd.MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=["a", "b", "dti"]) + return MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=["a", "b", "dti"]) @pytest.fixture @@ -76,4 +76,4 @@ def wide_multi_index(): dti = pd.date_range("2000-01-01", freq="s", periods=n * 2) levels = [ci, ci.codes + 9, dti, dti, dti] names = ["a", "b", "dti_1", "dti_2", "dti_3"] - return pd.MultiIndex.from_arrays(levels, names=names) + return MultiIndex.from_arrays(levels, names=names) diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py index 63afd5e130508..a2ca686d0412d 100644 --- a/pandas/tests/indexes/multi/test_constructors.py +++ b/pandas/tests/indexes/multi/test_constructors.py @@ -192,11 +192,11 @@ def test_from_arrays_tuples(idx): def test_from_arrays_index_series_datetimetz(): idx1 = pd.date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern") idx2 = pd.date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo") - result = pd.MultiIndex.from_arrays([idx1, idx2]) + result = MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) - result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) @@ -206,11 +206,11 @@ def test_from_arrays_index_series_datetimetz(): def test_from_arrays_index_series_timedelta(): idx1 = pd.timedelta_range("1 days", freq="D", periods=3) idx2 = pd.timedelta_range("2 hours", freq="H", periods=3) - result = pd.MultiIndex.from_arrays([idx1, idx2]) + result = MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) - result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) @@ -220,11 +220,11 @@ def test_from_arrays_index_series_timedelta(): def test_from_arrays_index_series_period(): idx1 = pd.period_range("2011-01-01", freq="D", periods=3) idx2 = pd.period_range("2015-01-01", freq="H", periods=3) - result = pd.MultiIndex.from_arrays([idx1, idx2]) + result = MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) - result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) @@ -237,13 +237,13 @@ def test_from_arrays_index_datetimelike_mixed(): idx3 = pd.timedelta_range("1 days", freq="D", periods=3) idx4 = pd.period_range("2011-01-01", freq="D", periods=3) - result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) + result = MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) tm.assert_index_equal(result.get_level_values(2), idx3) tm.assert_index_equal(result.get_level_values(3), idx4) - result2 = pd.MultiIndex.from_arrays( + result2 = MultiIndex.from_arrays( [Series(idx1), Series(idx2), Series(idx3), Series(idx4)] ) tm.assert_index_equal(result2.get_level_values(0), idx1) @@ -259,15 +259,15 @@ def test_from_arrays_index_series_categorical(): idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False) idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True) - result = pd.MultiIndex.from_arrays([idx1, idx2]) + result = MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) - result2 = pd.MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) - result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values]) + result3 = MultiIndex.from_arrays([idx1.values, idx2.values]) tm.assert_index_equal(result3.get_level_values(0), idx1) tm.assert_index_equal(result3.get_level_values(1), idx2) @@ -412,7 +412,7 @@ def test_from_tuples_with_tuple_label(): expected = pd.DataFrame( [[2, 1, 2], [4, (1, 2), 3]], columns=["a", "b", "c"] ).set_index(["a", "b"]) - idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=("a", "b")) + idx = MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=("a", "b")) result = pd.DataFrame([2, 3], columns=["c"], index=idx) tm.assert_frame_equal(expected, result) @@ -465,13 +465,13 @@ def test_from_product_invalid_input(invalid_input): def test_from_product_datetimeindex(): dt_index = date_range("2000-01-01", periods=2) - mi = pd.MultiIndex.from_product([[1, 2], dt_index]) + mi = MultiIndex.from_product([[1, 2], dt_index]) etalon = construct_1d_object_array_from_listlike( [ - (1, pd.Timestamp("2000-01-01")), - (1, pd.Timestamp("2000-01-02")), - (2, pd.Timestamp("2000-01-01")), - (2, pd.Timestamp("2000-01-02")), + (1, Timestamp("2000-01-01")), + (1, Timestamp("2000-01-02")), + (2, Timestamp("2000-01-01")), + (2, Timestamp("2000-01-02")), ] ) tm.assert_numpy_array_equal(mi.values, etalon) @@ -488,7 +488,7 @@ def test_from_product_index_series_categorical(ordered, f): list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered ) - result = pd.MultiIndex.from_product([first, f(idx)]) + result = MultiIndex.from_product([first, f(idx)]) tm.assert_index_equal(result.get_level_values(1), expected) @@ -639,10 +639,10 @@ def test_from_frame(): df = pd.DataFrame( [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], columns=["L1", "L2"] ) - expected = pd.MultiIndex.from_tuples( + expected = MultiIndex.from_tuples( [("a", "a"), ("a", "b"), ("b", "a"), ("b", "b")], names=["L1", "L2"] ) - result = pd.MultiIndex.from_frame(df) + result = MultiIndex.from_frame(df) tm.assert_index_equal(expected, result) @@ -660,7 +660,7 @@ def test_from_frame(): def test_from_frame_error(non_frame): # GH 22420 with pytest.raises(TypeError, match="Input must be a DataFrame"): - pd.MultiIndex.from_frame(non_frame) + MultiIndex.from_frame(non_frame) def test_from_frame_dtype_fidelity(): @@ -675,7 +675,7 @@ def test_from_frame_dtype_fidelity(): ) original_dtypes = df.dtypes.to_dict() - expected_mi = pd.MultiIndex.from_arrays( + expected_mi = MultiIndex.from_arrays( [ pd.date_range("19910905", periods=6, tz="US/Eastern"), [1, 1, 1, 2, 2, 2], @@ -684,7 +684,7 @@ def test_from_frame_dtype_fidelity(): ], names=["dates", "a", "b", "c"], ) - mi = pd.MultiIndex.from_frame(df) + mi = MultiIndex.from_frame(df) mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} tm.assert_index_equal(expected_mi, mi) @@ -698,9 +698,9 @@ def test_from_frame_valid_names(names_in, names_out): # GH 22420 df = pd.DataFrame( [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], - columns=pd.MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), ) - mi = pd.MultiIndex.from_frame(df, names=names_in) + mi = MultiIndex.from_frame(df, names=names_in) assert mi.names == names_out @@ -715,10 +715,10 @@ def test_from_frame_invalid_names(names, expected_error_msg): # GH 22420 df = pd.DataFrame( [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], - columns=pd.MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), ) with pytest.raises(ValueError, match=expected_error_msg): - pd.MultiIndex.from_frame(df, names=names) + MultiIndex.from_frame(df, names=names) def test_index_equal_empty_iterable(): diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py index e1b011b762fe7..c31c2416ff722 100644 --- a/pandas/tests/indexes/multi/test_equivalence.py +++ b/pandas/tests/indexes/multi/test_equivalence.py @@ -202,7 +202,7 @@ def test_equals_operator(idx): def test_equals_missing_values(): # make sure take is not using -1 - i = pd.MultiIndex.from_tuples([(0, pd.NaT), (0, pd.Timestamp("20130101"))]) + i = MultiIndex.from_tuples([(0, pd.NaT), (0, pd.Timestamp("20130101"))]) result = i[0:1].equals(i[0]) assert not result result = i[1:2].equals(i[1]) @@ -255,7 +255,7 @@ def test_multiindex_compare(): # Ensure comparison operations for MultiIndex with nlevels == 1 # behave consistently with those for MultiIndex with nlevels > 1 - midx = pd.MultiIndex.from_product([[0, 1]]) + midx = MultiIndex.from_product([[0, 1]]) # Equality self-test: MultiIndex object vs self expected = Series([True, True]) diff --git a/pandas/tests/indexes/multi/test_get_level_values.py b/pandas/tests/indexes/multi/test_get_level_values.py index ec65ec2c54689..f976515870259 100644 --- a/pandas/tests/indexes/multi/test_get_level_values.py +++ b/pandas/tests/indexes/multi/test_get_level_values.py @@ -42,7 +42,7 @@ def test_get_level_values(idx): def test_get_level_values_all_na(): # GH#17924 when level entirely consists of nan arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]] - index = pd.MultiIndex.from_arrays(arrays) + index = MultiIndex.from_arrays(arrays) result = index.get_level_values(0) expected = Index([np.nan, np.nan, np.nan], dtype=np.float64) tm.assert_index_equal(result, expected) @@ -55,13 +55,13 @@ def test_get_level_values_all_na(): def test_get_level_values_int_with_na(): # GH#17924 arrays = [["a", "b", "b"], [1, np.nan, 2]] - index = pd.MultiIndex.from_arrays(arrays) + index = MultiIndex.from_arrays(arrays) result = index.get_level_values(1) expected = Index([1, np.nan, 2]) tm.assert_index_equal(result, expected) arrays = [["a", "b", "b"], [np.nan, np.nan, 2]] - index = pd.MultiIndex.from_arrays(arrays) + index = MultiIndex.from_arrays(arrays) result = index.get_level_values(1) expected = Index([np.nan, np.nan, 2]) tm.assert_index_equal(result, expected) @@ -69,7 +69,7 @@ def test_get_level_values_int_with_na(): def test_get_level_values_na(): arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]] - index = pd.MultiIndex.from_arrays(arrays) + index = MultiIndex.from_arrays(arrays) result = index.get_level_values(0) expected = Index([np.nan, np.nan, np.nan]) tm.assert_index_equal(result, expected) @@ -79,13 +79,13 @@ def test_get_level_values_na(): tm.assert_index_equal(result, expected) arrays = [["a", "b", "b"], pd.DatetimeIndex([0, 1, pd.NaT])] - index = pd.MultiIndex.from_arrays(arrays) + index = MultiIndex.from_arrays(arrays) result = index.get_level_values(1) expected = pd.DatetimeIndex([0, 1, pd.NaT]) tm.assert_index_equal(result, expected) arrays = [[], []] - index = pd.MultiIndex.from_arrays(arrays) + index = MultiIndex.from_arrays(arrays) result = index.get_level_values(0) expected = Index([], dtype=object) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index 6f79878fd3ab1..63dd1b575284c 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -228,9 +228,9 @@ def test_set_codes(idx): assert_matching(idx.codes, codes) # label changing for levels of different magnitude of categories - ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)]) + ind = MultiIndex.from_tuples([(0, i) for i in range(130)]) new_codes = range(129, -1, -1) - expected = pd.MultiIndex.from_tuples([(0, i) for i in new_codes]) + expected = MultiIndex.from_tuples([(0, i) for i in new_codes]) # [w/o mutation] result = ind.set_codes(codes=new_codes, level=1) @@ -295,8 +295,8 @@ def test_set_names_with_nlevel_1(inplace): # GH 21149 # Ensure that .set_names for MultiIndex with # nlevels == 1 does not raise any errors - expected = pd.MultiIndex(levels=[[0, 1]], codes=[[0, 1]], names=["first"]) - m = pd.MultiIndex.from_product([[0, 1]]) + expected = MultiIndex(levels=[[0, 1]], codes=[[0, 1]], names=["first"]) + m = MultiIndex.from_product([[0, 1]]) result = m.set_names("first", level=0, inplace=inplace) if inplace: @@ -326,7 +326,7 @@ def test_set_value_keeps_names(): # motivating example from #3742 lev1 = ["hans", "hans", "hans", "grethe", "grethe", "grethe"] lev2 = ["1", "2", "3"] * 2 - idx = pd.MultiIndex.from_arrays([lev1, lev2], names=["Name", "Number"]) + idx = MultiIndex.from_arrays([lev1, lev2], names=["Name", "Number"]) df = pd.DataFrame( np.random.randn(6, 4), columns=["one", "two", "three", "four"], index=idx ) @@ -342,14 +342,12 @@ def test_set_levels_with_iterable(): # GH23273 sizes = [1, 2, 3] colors = ["black"] * 3 - index = pd.MultiIndex.from_arrays([sizes, colors], names=["size", "color"]) + index = MultiIndex.from_arrays([sizes, colors], names=["size", "color"]) result = index.set_levels(map(int, ["3", "2", "1"]), level="size") expected_sizes = [3, 2, 1] - expected = pd.MultiIndex.from_arrays( - [expected_sizes, colors], names=["size", "color"] - ) + expected = MultiIndex.from_arrays([expected_sizes, colors], names=["size", "color"]) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 57747f8274d85..e8e31aa0cef80 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -318,8 +318,8 @@ def test_get_indexer_three_or_more_levels(self): # 4: 2 7 6 # 5: 2 7 8 # 6: 3 6 8 - mult_idx_1 = pd.MultiIndex.from_product([[1, 3], [2, 4, 6], [5, 7]]) - mult_idx_2 = pd.MultiIndex.from_tuples( + mult_idx_1 = MultiIndex.from_product([[1, 3], [2, 4, 6], [5, 7]]) + mult_idx_2 = MultiIndex.from_tuples( [ (1, 1, 8), (1, 5, 9), @@ -418,8 +418,8 @@ def test_get_indexer_crossing_levels(self): # mult_idx_2: # 0: 1 3 2 2 # 1: 2 3 2 2 - mult_idx_1 = pd.MultiIndex.from_product([[1, 2]] * 4) - mult_idx_2 = pd.MultiIndex.from_tuples([(1, 3, 2, 2), (2, 3, 2, 2)]) + mult_idx_1 = MultiIndex.from_product([[1, 2]] * 4) + mult_idx_2 = MultiIndex.from_tuples([(1, 3, 2, 2), (2, 3, 2, 2)]) # show the tuple orderings, which get_indexer() should respect assert mult_idx_1[7] < mult_idx_2[0] < mult_idx_1[8] @@ -482,7 +482,7 @@ def test_getitem_bool_index_single(ind1, ind2): idx = MultiIndex.from_tuples([(10, 1)]) tm.assert_index_equal(idx[ind1], idx) - expected = pd.MultiIndex( + expected = MultiIndex( levels=[np.array([], dtype=np.int64), np.array([], dtype=np.int64)], codes=[[], []], ) @@ -572,7 +572,7 @@ def test_get_loc_level(self): def test_get_loc_multiple_dtypes(self, dtype1, dtype2): # GH 18520 levels = [np.array([0, 1]).astype(dtype1), np.array([0, 1]).astype(dtype2)] - idx = pd.MultiIndex.from_product(levels) + idx = MultiIndex.from_product(levels) assert idx.get_loc(idx[2]) == 2 @pytest.mark.parametrize("level", [0, 1]) @@ -755,7 +755,7 @@ def test_large_mi_contains(self): def test_timestamp_multiindex_indexer(): # https://github.com/pandas-dev/pandas/issues/26944 - idx = pd.MultiIndex.from_product( + idx = MultiIndex.from_product( [ pd.date_range("2019-01-01T00:15:33", periods=100, freq="H", name="date"), ["x"], @@ -764,7 +764,7 @@ def test_timestamp_multiindex_indexer(): ) df = pd.DataFrame({"foo": np.arange(len(idx))}, idx) result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"] - qidx = pd.MultiIndex.from_product( + qidx = MultiIndex.from_product( [ pd.date_range( start="2019-01-02T00:15:33", diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py index 6a353fe1ad6e7..f9ab0b3aceec4 100644 --- a/pandas/tests/indexes/multi/test_integrity.py +++ b/pandas/tests/indexes/multi/test_integrity.py @@ -24,7 +24,7 @@ def test_labels_dtypes(): i = MultiIndex.from_product([["a"], range(40000)]) assert i.codes[1].dtype == "int32" - i = pd.MultiIndex.from_product([["a"], range(1000)]) + i = MultiIndex.from_product([["a"], range(1000)]) assert (i.codes[0] >= 0).all() assert (i.codes[1] >= 0).all() @@ -38,7 +38,7 @@ def test_values_boxed(): (2, pd.Timestamp("2000-01-02")), (3, pd.Timestamp("2000-01-03")), ] - result = pd.MultiIndex.from_tuples(tuples) + result = MultiIndex.from_tuples(tuples) expected = construct_1d_object_array_from_listlike(tuples) tm.assert_numpy_array_equal(result.values, expected) # Check that code branches for boxed values produce identical results @@ -52,7 +52,7 @@ def test_values_multiindex_datetimeindex(): aware = pd.DatetimeIndex(ints, tz="US/Central") - idx = pd.MultiIndex.from_arrays([naive, aware]) + idx = MultiIndex.from_arrays([naive, aware]) result = idx.values outer = pd.DatetimeIndex([x[0] for x in result]) @@ -76,7 +76,7 @@ def test_values_multiindex_periodindex(): ints = np.arange(2007, 2012) pidx = pd.PeriodIndex(ints, freq="D") - idx = pd.MultiIndex.from_arrays([ints, pidx]) + idx = MultiIndex.from_arrays([ints, pidx]) result = idx.values outer = pd.Int64Index([x[0] for x in result]) @@ -139,7 +139,7 @@ def test_dims(): def take_invalid_kwargs(): vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]] - idx = pd.MultiIndex.from_product(vals, names=["str", "dt"]) + idx = MultiIndex.from_product(vals, names=["str", "dt"]) indices = [1, 2] msg = r"take\(\) got an unexpected keyword argument 'foo'" @@ -167,14 +167,14 @@ def test_isna_behavior(idx): def test_large_multiindex_error(): # GH12527 df_below_1000000 = pd.DataFrame( - 1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"] + 1, index=MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"] ) with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): df_below_1000000.loc[(-1, 0), "dest"] with pytest.raises(KeyError, match=r"^\(3, 0\)$"): df_below_1000000.loc[(3, 0), "dest"] df_above_1000000 = pd.DataFrame( - 1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"] + 1, index=MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"] ) with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): df_above_1000000.loc[(-1, 0), "dest"] @@ -186,7 +186,7 @@ def test_million_record_attribute_error(): # GH 18165 r = list(range(1000000)) df = pd.DataFrame( - {"a": r, "b": r}, index=pd.MultiIndex.from_tuples([(x, x) for x in r]) + {"a": r, "b": r}, index=MultiIndex.from_tuples([(x, x) for x in r]) ) msg = "'Series' object has no attribute 'foo'" @@ -219,7 +219,7 @@ def test_metadata_immutable(idx): def test_level_setting_resets_attributes(): - ind = pd.MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) + ind = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) assert ind.is_monotonic with tm.assert_produces_warning(FutureWarning): ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True) @@ -237,9 +237,7 @@ def test_rangeindex_fallback_coercion_bug(): str(df) expected = pd.DataFrame( {"bar": np.arange(100), "foo": np.arange(100)}, - index=pd.MultiIndex.from_product( - [range(10), range(10)], names=["fizz", "buzz"] - ), + index=MultiIndex.from_product([range(10), range(10)], names=["fizz", "buzz"]), ) tm.assert_frame_equal(df, expected, check_like=True) diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py index 4c9d518778ceb..cd95802ac29c9 100644 --- a/pandas/tests/indexes/multi/test_missing.py +++ b/pandas/tests/indexes/multi/test_missing.py @@ -15,7 +15,7 @@ def test_fillna(idx): def test_dropna(): # GH 6194 - idx = pd.MultiIndex.from_arrays( + idx = MultiIndex.from_arrays( [ [1, np.nan, 3, np.nan, 5], [1, 2, np.nan, np.nan, 5], @@ -23,11 +23,11 @@ def test_dropna(): ] ) - exp = pd.MultiIndex.from_arrays([[1, 5], [1, 5], ["a", "e"]]) + exp = MultiIndex.from_arrays([[1, 5], [1, 5], ["a", "e"]]) tm.assert_index_equal(idx.dropna(), exp) tm.assert_index_equal(idx.dropna(how="any"), exp) - exp = pd.MultiIndex.from_arrays( + exp = MultiIndex.from_arrays( [[1, np.nan, 3, 5], [1, 2, np.nan, 5], ["a", "b", "c", "e"]] ) tm.assert_index_equal(idx.dropna(how="all"), exp) @@ -87,10 +87,8 @@ def test_hasnans_isnans(idx): def test_nan_stays_float(): # GH 7031 - idx0 = pd.MultiIndex( - levels=[["A", "B"], []], codes=[[1, 0], [-1, -1]], names=[0, 1] - ) - idx1 = pd.MultiIndex(levels=[["C"], ["D"]], codes=[[0], [0]], names=[0, 1]) + idx0 = MultiIndex(levels=[["A", "B"], []], codes=[[1, 0], [-1, -1]], names=[0, 1]) + idx1 = MultiIndex(levels=[["C"], ["D"]], codes=[[0], [0]], names=[0, 1]) idxm = idx0.join(idx1, how="outer") assert pd.isna(idx0.get_level_values(1)).all() # the following failed in 0.14.1 diff --git a/pandas/tests/indexes/multi/test_monotonic.py b/pandas/tests/indexes/multi/test_monotonic.py index 8659573d8123a..11bcd61383a7c 100644 --- a/pandas/tests/indexes/multi/test_monotonic.py +++ b/pandas/tests/indexes/multi/test_monotonic.py @@ -1,7 +1,6 @@ import numpy as np import pytest -import pandas as pd from pandas import Index, MultiIndex @@ -162,7 +161,7 @@ def test_is_monotonic_decreasing(): def test_is_strictly_monotonic_increasing(): - idx = pd.MultiIndex( + idx = MultiIndex( levels=[["bar", "baz"], ["mom", "next"]], codes=[[0, 0, 1, 1], [0, 0, 0, 1]] ) assert idx.is_monotonic_increasing is True @@ -170,7 +169,7 @@ def test_is_strictly_monotonic_increasing(): def test_is_strictly_monotonic_decreasing(): - idx = pd.MultiIndex( + idx = MultiIndex( levels=[["baz", "bar"], ["next", "mom"]], codes=[[0, 0, 1, 1], [0, 0, 0, 1]] ) assert idx.is_monotonic_decreasing is True @@ -184,5 +183,5 @@ def test_is_strictly_monotonic_decreasing(): ) def test_is_monotonic_with_nans(values, attr): # GH: 37220 - idx = pd.MultiIndex.from_tuples(values, names=["test"]) + idx = MultiIndex.from_tuples(values, names=["test"]) assert getattr(idx, attr) is False diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index f38da7ad2ae1c..891380b35a8be 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -127,14 +127,14 @@ def test_duplicate_level_names_access_raises(idx): def test_get_names_from_levels(): - idx = pd.MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) assert idx.levels[0].name == "a" assert idx.levels[1].name == "b" def test_setting_names_from_levels_raises(): - idx = pd.MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) with pytest.raises(RuntimeError, match="set_names"): idx.levels[0].name = "foo" diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py index 3d7e6e9c32248..cd063a0c3f74b 100644 --- a/pandas/tests/indexes/multi/test_sorting.py +++ b/pandas/tests/indexes/multi/test_sorting.py @@ -5,7 +5,6 @@ from pandas.errors import PerformanceWarning, UnsortedIndexError -import pandas as pd from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex import pandas._testing as tm @@ -94,7 +93,7 @@ def test_numpy_argsort(idx): def test_unsortedindex(): # GH 11897 - mi = pd.MultiIndex.from_tuples( + mi = MultiIndex.from_tuples( [("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")], names=["one", "two"], ) @@ -160,7 +159,7 @@ def test_reconstruct_sort(): assert Index(mi.values).equals(Index(recons.values)) # cannot convert to lexsorted - mi = pd.MultiIndex.from_tuples( + mi = MultiIndex.from_tuples( [("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")], names=["one", "two"], ) @@ -260,9 +259,7 @@ def test_remove_unused_levels_large(first_type, second_type): ) def test_remove_unused_nan(level0, level1): # GH 18417 - mi = pd.MultiIndex( - levels=[level0, level1], codes=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]] - ) + mi = MultiIndex(levels=[level0, level1], codes=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]]) result = mi.remove_unused_levels() tm.assert_index_equal(result, mi) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 9820b39e20651..f37c3dff1e338 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -112,7 +112,7 @@ def test_constructor_from_index_dtlike(self, cast_as_obj, index): tm.assert_index_equal(result, index) - if isinstance(index, pd.DatetimeIndex): + if isinstance(index, DatetimeIndex): assert result.tz == index.tz if cast_as_obj: # GH#23524 check that Index(dti, dtype=object) does not @@ -222,7 +222,7 @@ def test_constructor_no_pandas_array(self): "klass,dtype,na_val", [ (pd.Float64Index, np.float64, np.nan), - (pd.DatetimeIndex, "datetime64[ns]", pd.NaT), + (DatetimeIndex, "datetime64[ns]", pd.NaT), ], ) def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val): @@ -336,7 +336,7 @@ def test_constructor_dtypes_to_timedelta(self, cast_index, vals): assert isinstance(index, TimedeltaIndex) @pytest.mark.parametrize("attr", ["values", "asi8"]) - @pytest.mark.parametrize("klass", [Index, pd.DatetimeIndex]) + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass): # Test constructing with a datetimetz dtype # .values produces numpy datetimes, so these are considered naive @@ -349,25 +349,25 @@ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass): dtype = index.dtype if attr == "asi8": - result = pd.DatetimeIndex(arg).tz_localize(tz_naive_fixture) + result = DatetimeIndex(arg).tz_localize(tz_naive_fixture) else: result = klass(arg, tz=tz_naive_fixture) tm.assert_index_equal(result, index) if attr == "asi8": - result = pd.DatetimeIndex(arg).astype(dtype) + result = DatetimeIndex(arg).astype(dtype) else: result = klass(arg, dtype=dtype) tm.assert_index_equal(result, index) if attr == "asi8": - result = pd.DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture) + result = DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture) else: result = klass(list(arg), tz=tz_naive_fixture) tm.assert_index_equal(result, index) if attr == "asi8": - result = pd.DatetimeIndex(list(arg)).astype(dtype) + result = DatetimeIndex(list(arg)).astype(dtype) else: result = klass(list(arg), dtype=dtype) tm.assert_index_equal(result, index) @@ -602,7 +602,7 @@ def test_empty_fancy(self, index, dtype): @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) def test_empty_fancy_raises(self, index): - # pd.DatetimeIndex is excluded, because it overrides getitem and should + # DatetimeIndex is excluded, because it overrides getitem and should # be tested separately. empty_farr = np.array([], dtype=np.float_) empty_index = type(index)([]) @@ -1014,13 +1014,13 @@ def test_symmetric_difference(self, sort): @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) def test_difference_incomparable(self, opname): - a = Index([3, pd.Timestamp("2000"), 1]) - b = Index([2, pd.Timestamp("1999"), 1]) + a = Index([3, Timestamp("2000"), 1]) + b = Index([2, Timestamp("1999"), 1]) op = operator.methodcaller(opname, b) # sort=None, the default result = op(a) - expected = Index([3, pd.Timestamp("2000"), 2, pd.Timestamp("1999")]) + expected = Index([3, Timestamp("2000"), 2, Timestamp("1999")]) if opname == "difference": expected = expected[:2] tm.assert_index_equal(result, expected) @@ -1035,8 +1035,8 @@ def test_difference_incomparable(self, opname): def test_difference_incomparable_true(self, opname): # TODO decide on True behaviour # # sort=True, raises - a = Index([3, pd.Timestamp("2000"), 1]) - b = Index([2, pd.Timestamp("1999"), 1]) + a = Index([3, Timestamp("2000"), 1]) + b = Index([2, Timestamp("1999"), 1]) op = operator.methodcaller(opname, b, sort=True) with pytest.raises(TypeError, match="Cannot compare"): @@ -2003,7 +2003,7 @@ def test_reindex_preserves_type_if_target_is_empty_list_or_array(self, labels): [ (pd.Int64Index([]), np.int64), (pd.Float64Index([]), np.float64), - (pd.DatetimeIndex([]), np.datetime64), + (DatetimeIndex([]), np.datetime64), ], ) def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dtype): @@ -2014,7 +2014,7 @@ def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dty def test_reindex_no_type_preserve_target_empty_mi(self): index = Index(list("abc")) result = index.reindex( - pd.MultiIndex([pd.Int64Index([]), pd.Float64Index([])], [[], []]) + MultiIndex([pd.Int64Index([]), pd.Float64Index([])], [[], []]) )[0] assert result.levels[0].dtype.type == np.int64 assert result.levels[1].dtype.type == np.float64 @@ -2346,12 +2346,12 @@ def test_dropna(self, how, dtype, vals, expected): "index,expected", [ ( - pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), - pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), ), ( - pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]), - pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), ), ( pd.TimedeltaIndex(["1 days", "2 days", "3 days"]), @@ -2615,7 +2615,7 @@ def test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype): def construct(dtype): if dtype is dtlike_dtypes[-1]: # PeriodArray will try to cast ints to strings - return pd.DatetimeIndex(vals).astype(dtype) + return DatetimeIndex(vals).astype(dtype) return Index(vals, dtype=dtype) left = construct(ldtype) diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index d274aa0b06584..a908cada5b5dc 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -107,7 +107,7 @@ def test_astype_category(self): obj = pd.timedelta_range("1H", periods=2, freq="H") result = obj.astype("category") - expected = pd.CategoricalIndex([pd.Timedelta("1H"), pd.Timedelta("2H")]) + expected = pd.CategoricalIndex([Timedelta("1H"), Timedelta("2H")]) tm.assert_index_equal(result, expected) result = obj._data.astype("category") diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py index 09344bb5054f6..1c0104f340f75 100644 --- a/pandas/tests/indexes/timedeltas/test_constructors.py +++ b/pandas/tests/indexes/timedeltas/test_constructors.py @@ -29,7 +29,7 @@ def test_infer_from_tdi(self): # has one tdi = pd.timedelta_range("1 second", periods=10 ** 7, freq="1s") - result = pd.TimedeltaIndex(tdi, freq="infer") + result = TimedeltaIndex(tdi, freq="infer") assert result.freq == tdi.freq # check that inferred_freq was not called by checking that the @@ -89,7 +89,7 @@ def test_float64_ns_rounded(self): # NaNs get converted to NaT tdi = TimedeltaIndex([2.0, np.nan]) - expected = TimedeltaIndex([pd.Timedelta(nanoseconds=2), pd.NaT]) + expected = TimedeltaIndex([Timedelta(nanoseconds=2), pd.NaT]) tm.assert_index_equal(tdi, expected) def test_float64_unit_conversion(self): @@ -99,13 +99,13 @@ def test_float64_unit_conversion(self): tm.assert_index_equal(tdi, expected) def test_construction_base_constructor(self): - arr = [pd.Timedelta("1 days"), pd.NaT, pd.Timedelta("3 days")] - tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr)) - tm.assert_index_equal(pd.Index(np.array(arr)), pd.TimedeltaIndex(np.array(arr))) + arr = [Timedelta("1 days"), pd.NaT, Timedelta("3 days")] + tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr)) + tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr))) - arr = [np.nan, pd.NaT, pd.Timedelta("1 days")] - tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr)) - tm.assert_index_equal(pd.Index(np.array(arr)), pd.TimedeltaIndex(np.array(arr))) + arr = [np.nan, pd.NaT, Timedelta("1 days")] + tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr)) + tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr))) def test_constructor(self): expected = TimedeltaIndex( @@ -218,7 +218,7 @@ def test_constructor_no_precision_raises(self): msg = "with no precision is not allowed" with pytest.raises(ValueError, match=msg): - pd.TimedeltaIndex(["2000"], dtype="timedelta64") + TimedeltaIndex(["2000"], dtype="timedelta64") with pytest.raises(ValueError, match=msg): pd.Index(["2000"], dtype="timedelta64") @@ -226,7 +226,7 @@ def test_constructor_no_precision_raises(self): def test_constructor_wrong_precision_raises(self): msg = r"dtype timedelta64\[us\] cannot be converted to timedelta64\[ns\]" with pytest.raises(ValueError, match=msg): - pd.TimedeltaIndex(["2000"], dtype="timedelta64[us]") + TimedeltaIndex(["2000"], dtype="timedelta64[us]") def test_explicit_none_freq(self): # Explicitly passing freq=None is respected diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index c4429137d17f0..b74160e7e0635 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -181,13 +181,13 @@ def test_drop_duplicates(self, freq_sample, keep, expected, index): def test_infer_freq(self, freq_sample): # GH#11018 idx = pd.timedelta_range("1", freq=freq_sample, periods=10) - result = pd.TimedeltaIndex(idx.asi8, freq="infer") + result = TimedeltaIndex(idx.asi8, freq="infer") tm.assert_index_equal(idx, result) assert result.freq == freq_sample def test_repeat(self): index = pd.timedelta_range("1 days", periods=2, freq="D") - exp = pd.TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"]) + exp = TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"]) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) assert res.freq is None @@ -211,17 +211,17 @@ def test_repeat(self): assert res.freq is None def test_nat(self): - assert pd.TimedeltaIndex._na_value is pd.NaT - assert pd.TimedeltaIndex([])._na_value is pd.NaT + assert TimedeltaIndex._na_value is pd.NaT + assert TimedeltaIndex([])._na_value is pd.NaT - idx = pd.TimedeltaIndex(["1 days", "2 days"]) + idx = TimedeltaIndex(["1 days", "2 days"]) assert idx._can_hold_na tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) assert idx.hasnans is False tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp)) - idx = pd.TimedeltaIndex(["1 days", "NaT"]) + idx = TimedeltaIndex(["1 days", "NaT"]) assert idx._can_hold_na tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) @@ -230,7 +230,7 @@ def test_nat(self): def test_equals(self): # GH 13107 - idx = pd.TimedeltaIndex(["1 days", "2 days", "NaT"]) + idx = TimedeltaIndex(["1 days", "2 days", "NaT"]) assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.astype(object)) @@ -239,7 +239,7 @@ def test_equals(self): assert not idx.equals(list(idx)) assert not idx.equals(Series(idx)) - idx2 = pd.TimedeltaIndex(["2 days", "1 days", "NaT"]) + idx2 = TimedeltaIndex(["2 days", "1 days", "NaT"]) assert not idx.equals(idx2) assert not idx.equals(idx2.copy()) assert not idx.equals(idx2.astype(object)) diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py index 6a2f66cade733..16ac70d9f23f2 100644 --- a/pandas/tests/indexes/timedeltas/test_setops.py +++ b/pandas/tests/indexes/timedeltas/test_setops.py @@ -35,7 +35,7 @@ def test_union_sort_false(self): tm.assert_index_equal(result, tdi) result = left.union(right, sort=False) - expected = pd.TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"]) + expected = TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"]) tm.assert_index_equal(result, expected) def test_union_coverage(self): @@ -228,7 +228,7 @@ def test_difference_freq(self, sort): def test_difference_sort(self, sort): - index = pd.TimedeltaIndex( + index = TimedeltaIndex( ["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"] ) diff --git a/pandas/tests/indexes/timedeltas/test_shift.py b/pandas/tests/indexes/timedeltas/test_shift.py index 1282bd510ec17..9864f7358018e 100644 --- a/pandas/tests/indexes/timedeltas/test_shift.py +++ b/pandas/tests/indexes/timedeltas/test_shift.py @@ -14,26 +14,26 @@ class TestTimedeltaIndexShift: def test_tdi_shift_empty(self): # GH#9903 - idx = pd.TimedeltaIndex([], name="xxx") + idx = TimedeltaIndex([], name="xxx") tm.assert_index_equal(idx.shift(0, freq="H"), idx) tm.assert_index_equal(idx.shift(3, freq="H"), idx) def test_tdi_shift_hours(self): # GH#9903 - idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") + idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") tm.assert_index_equal(idx.shift(0, freq="H"), idx) - exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx") + exp = TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx") tm.assert_index_equal(idx.shift(3, freq="H"), exp) - exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx") + exp = TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx") tm.assert_index_equal(idx.shift(-3, freq="H"), exp) def test_tdi_shift_minutes(self): # GH#9903 - idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") + idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") tm.assert_index_equal(idx.shift(0, freq="T"), idx) - exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx") + exp = TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx") tm.assert_index_equal(idx.shift(3, freq="T"), exp) - exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx") + exp = TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx") tm.assert_index_equal(idx.shift(-3, freq="T"), exp) def test_tdi_shift_int(self): diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index 2cb5b55f14596..d79af1ea6b804 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -447,7 +447,7 @@ def test_loc_period_string_indexing(): # GH 9892 a = pd.period_range("2013Q1", "2013Q4", freq="Q") i = (1111, 2222, 3333) - idx = pd.MultiIndex.from_product((a, i), names=("Periode", "CVR")) + idx = MultiIndex.from_product((a, i), names=("Periode", "CVR")) df = DataFrame( index=idx, columns=( @@ -467,7 +467,7 @@ def test_loc_period_string_indexing(): [np.nan], dtype=object, name="OMS", - index=pd.MultiIndex.from_tuples( + index=MultiIndex.from_tuples( [(pd.Period("2013Q1"), 1111)], names=["Periode", "CVR"] ), ) @@ -477,7 +477,7 @@ def test_loc_period_string_indexing(): def test_loc_datetime_mask_slicing(): # GH 16699 dt_idx = pd.to_datetime(["2017-05-04", "2017-05-05"]) - m_idx = pd.MultiIndex.from_product([dt_idx, dt_idx], names=["Idx1", "Idx2"]) + m_idx = MultiIndex.from_product([dt_idx, dt_idx], names=["Idx1", "Idx2"]) df = DataFrame( data=[[1, 2], [3, 4], [5, 6], [7, 6]], index=m_idx, columns=["C1", "C2"] ) @@ -498,7 +498,7 @@ def test_loc_datetime_series_tuple_slicing(): date = pd.Timestamp("2000") ser = Series( 1, - index=pd.MultiIndex.from_tuples([("a", date)], names=["a", "b"]), + index=MultiIndex.from_tuples([("a", date)], names=["a", "b"]), name="c", ) result = ser.loc[:, [date]] @@ -568,7 +568,7 @@ def test_3levels_leading_period_index(): ) lev2 = ["A", "A", "Z", "W"] lev3 = ["B", "C", "Q", "F"] - mi = pd.MultiIndex.from_arrays([pi, lev2, lev3]) + mi = MultiIndex.from_arrays([pi, lev2, lev3]) ser = Series(range(4), index=mi, dtype=np.float64) result = ser.loc[(pi[0], "A", "B")] @@ -586,7 +586,7 @@ def test_missing_keys_raises_keyerror(self): def test_missing_key_raises_keyerror2(self): # GH#21168 KeyError, not "IndexingError: Too many indexers" - ser = Series(-1, index=pd.MultiIndex.from_product([[0, 1]] * 2)) + ser = Series(-1, index=MultiIndex.from_product([[0, 1]] * 2)) with pytest.raises(KeyError, match=r"\(0, 3\)"): ser.loc[0, 3] diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index 059f8543104a7..543416126f12c 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -427,7 +427,7 @@ def test_astype_assignment_with_dups(self): def test_setitem_nonmonotonic(self): # https://github.com/pandas-dev/pandas/issues/31449 - index = pd.MultiIndex.from_tuples( + index = MultiIndex.from_tuples( [("a", "c"), ("b", "x"), ("a", "d")], names=["l1", "l2"] ) df = DataFrame(data=[0, 1, 2], index=index, columns=["e"]) diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index 5e00056c33db7..4879f805b5a2d 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -304,12 +304,8 @@ def test_loc_getitem_across_dst(self): ) series2 = Series([0, 1, 2, 3, 4], index=idx) - t_1 = pd.Timestamp( - "2017-10-29 02:30:00+02:00", tz="Europe/Berlin", freq="30min" - ) - t_2 = pd.Timestamp( - "2017-10-29 02:00:00+01:00", tz="Europe/Berlin", freq="30min" - ) + t_1 = Timestamp("2017-10-29 02:30:00+02:00", tz="Europe/Berlin", freq="30min") + t_2 = Timestamp("2017-10-29 02:00:00+01:00", tz="Europe/Berlin", freq="30min") result = series2.loc[t_1:t_2] expected = Series([2, 3], index=idx[2:4]) tm.assert_series_equal(result, expected) @@ -330,9 +326,9 @@ def test_loc_incremental_setitem_with_dst(self): def test_loc_setitem_with_existing_dst(self): # GH 18308 - start = pd.Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid") - end = pd.Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid") - ts = pd.Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid") + start = Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid") + end = Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid") + ts = Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid") idx = pd.date_range(start, end, closed="left", freq="H") result = DataFrame(index=idx, columns=["value"]) result.loc[ts, "value"] = 12 diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 9d1a2bed5db12..689cdbce103e6 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1072,7 +1072,7 @@ def test_datetime_block_can_hold_element(self): class TestShouldStore: def test_should_store_categorical(self): - cat = pd.Categorical(["A", "B", "C"]) + cat = Categorical(["A", "B", "C"]) df = DataFrame(cat) blk = df._mgr.blocks[0] @@ -1114,7 +1114,7 @@ def test_validate_ndim(): def test_block_shape(): idx = Index([0, 1, 2, 3, 4]) a = Series([1, 2, 3]).reindex(idx) - b = Series(pd.Categorical([1, 2, 3])).reindex(idx) + b = Series(Categorical([1, 2, 3])).reindex(idx) assert a._mgr.blocks[0].mgr_locs.indexer == b._mgr.blocks[0].mgr_locs.indexer diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index c474e67123ef7..ec0a32318ec34 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -992,7 +992,7 @@ def test_no_header_with_list_index_col(self, read_ext): # GH 31783 file_name = "testmultiindex" + read_ext data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)] - idx = pd.MultiIndex.from_tuples( + idx = MultiIndex.from_tuples( [("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1) ) expected = DataFrame(data, index=idx, columns=(2, 3)) @@ -1168,9 +1168,7 @@ def test_excel_high_surrogate(self, engine): def test_header_with_index_col(self, engine, filename): # GH 33476 idx = Index(["Z"], name="I2") - cols = pd.MultiIndex.from_tuples( - [("A", "B"), ("A", "B.1")], names=["I11", "I12"] - ) + cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"]) expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64") result = pd.read_excel( filename, sheet_name="Sheet1", index_col=0, header=[0, 1] @@ -1185,7 +1183,7 @@ def test_read_datetime_multiindex(self, engine, read_ext): f = "test_datetime_mi" + read_ext with pd.ExcelFile(f) as excel: actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine) - expected_column_index = pd.MultiIndex.from_tuples( + expected_column_index = MultiIndex.from_tuples( [(pd.to_datetime("02/29/2020"), pd.to_datetime("03/01/2020"))], names=[ pd.to_datetime("02/29/2020").to_pydatetime(), diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 72b28c71b6511..b3cf7a6828808 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1009,7 +1009,7 @@ def test_datetimelike_frame(self): # GH 12211 df = DataFrame( - {"date": [pd.Timestamp("20130101").tz_localize("UTC")] + [pd.NaT] * 5} + {"date": [Timestamp("20130101").tz_localize("UTC")] + [pd.NaT] * 5} ) with option_context("display.max_rows", 5): @@ -1019,7 +1019,7 @@ def test_datetimelike_frame(self): assert "..." in result assert "[6 rows x 1 columns]" in result - dts = [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [pd.NaT] * 5 + dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [pd.NaT] * 5 df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) with option_context("display.max_rows", 5): expected = ( @@ -1033,7 +1033,7 @@ def test_datetimelike_frame(self): ) assert repr(df) == expected - dts = [pd.NaT] * 5 + [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5 + dts = [pd.NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5 df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) with option_context("display.max_rows", 5): expected = ( @@ -1047,8 +1047,8 @@ def test_datetimelike_frame(self): ) assert repr(df) == expected - dts = [pd.Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [ - pd.Timestamp("2011-01-01", tz="US/Eastern") + dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [ + Timestamp("2011-01-01", tz="US/Eastern") ] * 5 df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) with option_context("display.max_rows", 5): @@ -2184,7 +2184,7 @@ def test_east_asian_unicode_series(self): # object dtype, longer than unicode repr s = Series( - [1, 22, 3333, 44444], index=[1, "AB", pd.Timestamp("2011-01-01"), "あああ"] + [1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"] ) expected = ( "1 1\n" @@ -2282,7 +2282,7 @@ def test_east_asian_unicode_series(self): # object dtype, longer than unicode repr s = Series( [1, 22, 3333, 44444], - index=[1, "AB", pd.Timestamp("2011-01-01"), "あああ"], + index=[1, "AB", Timestamp("2011-01-01"), "あああ"], ) expected = ( "1 1\n" diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 71698a02285f9..6e35b224ef4c3 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -252,7 +252,7 @@ def test_read_json_from_to_json_results(self): } ) result1 = pd.read_json(df.to_json()) - result2 = pd.DataFrame.from_dict(json.loads(df.to_json())) + result2 = DataFrame.from_dict(json.loads(df.to_json())) tm.assert_frame_equal(result1, df) tm.assert_frame_equal(result2, df) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 92cc0f969ec87..47b7bd0983305 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -448,8 +448,8 @@ def test_v12_compat(self, datapath): columns=["A", "B", "C", "D"], index=dti, ) - df["date"] = pd.Timestamp("19920106 18:21:32.12") - df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101") + df["date"] = Timestamp("19920106 18:21:32.12") + df.iloc[3, df.columns.get_loc("date")] = Timestamp("20130101") df["modified"] = df["date"] df.iloc[1, df.columns.get_loc("modified")] = pd.NaT @@ -788,9 +788,7 @@ def test_convert_dates(self, datetime_series, datetime_frame): @pytest.mark.parametrize("date_format", ["epoch", "iso"]) @pytest.mark.parametrize("as_object", [True, False]) - @pytest.mark.parametrize( - "date_typ", [datetime.date, datetime.datetime, pd.Timestamp] - ) + @pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp]) def test_date_index_and_values(self, date_format, as_object, date_typ): data = [date_typ(year=2020, month=1, day=1), pd.NaT] if as_object: @@ -1023,12 +1021,10 @@ def test_timedelta(self): tm.assert_frame_equal(frame, result) def test_mixed_timedelta_datetime(self): - frame = DataFrame( - {"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object - ) + frame = DataFrame({"a": [timedelta(23), Timestamp("20130101")]}, dtype=object) expected = DataFrame( - {"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]} + {"a": [pd.Timedelta(frame.a[0]).value, Timestamp(frame.a[1]).value]} ) result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"}) tm.assert_frame_equal(result, expected, check_index_type=False) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 662659982c0b3..7a5203ca86520 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -641,7 +641,7 @@ def test_nat_parse(all_parsers): # see gh-3062 parser = all_parsers df = DataFrame( - dict({"A": np.arange(10, dtype="float64"), "B": pd.Timestamp("20010101")}) + dict({"A": np.arange(10, dtype="float64"), "B": Timestamp("20010101")}) ) df.iloc[3:6, :] = np.nan @@ -1472,7 +1472,7 @@ def test_parse_timezone(all_parsers): 2018-01-04 09:05:00+09:00,23400""" result = parser.read_csv(StringIO(data), parse_dates=["dt"]) - dti = pd.DatetimeIndex( + dti = DatetimeIndex( list( pd.date_range( start="2018-01-04 09:01:00", diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 2437ba77532fa..f37b0aabd3aed 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -1918,7 +1918,7 @@ def test_select_columns_in_where(self, setup_path): def test_mi_data_columns(self, setup_path): # GH 14435 - idx = pd.MultiIndex.from_arrays( + idx = MultiIndex.from_arrays( [date_range("2000-01-01", periods=5), range(5)], names=["date", "id"] ) df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) @@ -2325,7 +2325,7 @@ def test_same_name_scoping(self, setup_path): np.random.randn(20, 2), index=pd.date_range("20130101", periods=20) ) store.put("df", df, format="table") - expected = df[df.index > pd.Timestamp("20130105")] + expected = df[df.index > Timestamp("20130105")] import datetime @@ -4152,7 +4152,7 @@ def test_legacy_table_fixed_format_read_datetime_py2(self, datapath, setup_path) ) as store: result = store.select("df") expected = DataFrame( - [[pd.Timestamp("2020-02-06T18:00")]], + [[Timestamp("2020-02-06T18:00")]], columns=["A"], index=Index(["date"]), ) @@ -4768,14 +4768,14 @@ def test_query_compare_column_type(self, setup_path): with ensure_clean_store(setup_path) as store: store.append("test", df, format="table", data_columns=True) - ts = pd.Timestamp("2014-01-01") # noqa + ts = Timestamp("2014-01-01") # noqa result = store.select("test", where="real_date > ts") expected = df.loc[[1], :] tm.assert_frame_equal(expected, result) for op in ["<", ">", "=="]: # non strings to string column always fail - for v in [2.1, True, pd.Timestamp("2014-01-01"), pd.Timedelta(1, "s")]: + for v in [2.1, True, Timestamp("2014-01-01"), pd.Timedelta(1, "s")]: query = f"date {op} v" with pytest.raises(TypeError): store.select("test", where=query) diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py index e137bc2dca48e..cc418bc52cae1 100644 --- a/pandas/tests/io/pytables/test_timezones.py +++ b/pandas/tests/io/pytables/test_timezones.py @@ -212,7 +212,7 @@ def test_append_with_timezones_pytz(setup_path): def test_roundtrip_tz_aware_index(setup_path): # GH 17618 - time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern") + time = Timestamp("2000-01-01 01:00:00", tz="US/Eastern") df = DataFrame(data=[0], index=[time]) with ensure_clean_store(setup_path) as store: @@ -225,7 +225,7 @@ def test_roundtrip_tz_aware_index(setup_path): def test_store_index_name_with_tz(setup_path): # GH 13884 df = DataFrame({"A": [1, 2]}) - df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788]) + df.index = DatetimeIndex([1234567890123456787, 1234567890123456788]) df.index = df.index.tz_localize("UTC") df.index.name = "foo" @@ -402,7 +402,7 @@ def test_py2_created_with_datetimez(datapath, setup_path): # Python 3. # # GH26443 - index = [pd.Timestamp("2019-01-01T18:00").tz_localize("America/New_York")] + index = [Timestamp("2019-01-01T18:00").tz_localize("America/New_York")] expected = DataFrame({"data": 123}, index=index) with ensure_clean_store( datapath("io", "data", "legacy_hdf", "gh26443.h5"), mode="r" diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index d6506d434d6a7..19eb64be1be29 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -697,8 +697,8 @@ def test_date_parsing(self): ) assert issubclass(df.DateCol.dtype.type, np.datetime64) assert df.DateCol.tolist() == [ - pd.Timestamp(2000, 1, 3, 0, 0, 0), - pd.Timestamp(2000, 1, 4, 0, 0, 0), + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), ] df = sql.read_sql_query( @@ -708,8 +708,8 @@ def test_date_parsing(self): ) assert issubclass(df.DateCol.dtype.type, np.datetime64) assert df.DateCol.tolist() == [ - pd.Timestamp(2000, 1, 3, 0, 0, 0), - pd.Timestamp(2000, 1, 4, 0, 0, 0), + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), ] df = sql.read_sql_query( @@ -717,8 +717,8 @@ def test_date_parsing(self): ) assert issubclass(df.IntDateCol.dtype.type, np.datetime64) assert df.IntDateCol.tolist() == [ - pd.Timestamp(1986, 12, 25, 0, 0, 0), - pd.Timestamp(2013, 1, 1, 0, 0, 0), + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), ] df = sql.read_sql_query( @@ -726,8 +726,8 @@ def test_date_parsing(self): ) assert issubclass(df.IntDateCol.dtype.type, np.datetime64) assert df.IntDateCol.tolist() == [ - pd.Timestamp(1986, 12, 25, 0, 0, 0), - pd.Timestamp(2013, 1, 1, 0, 0, 0), + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), ] df = sql.read_sql_query( @@ -737,8 +737,8 @@ def test_date_parsing(self): ) assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) assert df.IntDateOnlyCol.tolist() == [ - pd.Timestamp("2010-10-10"), - pd.Timestamp("2010-12-12"), + Timestamp("2010-10-10"), + Timestamp("2010-12-12"), ] def test_date_and_index(self): diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index fecffd75f9478..b065aa187f5fb 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -990,7 +990,7 @@ def test_categorical_writing(self, version): def test_categorical_warnings_and_errors(self): # Warning for non-string labels # Error for labels too long - original = pd.DataFrame.from_records( + original = DataFrame.from_records( [["a" * 10000], ["b" * 10000], ["c" * 10000], ["d" * 10000]], columns=["Too_long"], ) @@ -1006,7 +1006,7 @@ def test_categorical_warnings_and_errors(self): with pytest.raises(ValueError, match=msg): original.to_stata(path) - original = pd.DataFrame.from_records( + original = DataFrame.from_records( [["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"] ) original = pd.concat( @@ -1021,7 +1021,7 @@ def test_categorical_warnings_and_errors(self): def test_categorical_with_stata_missing_values(self, version): values = [["a" + str(i)] for i in range(120)] values.append([np.nan]) - original = pd.DataFrame.from_records(values, columns=["many_labels"]) + original = DataFrame.from_records(values, columns=["many_labels"]) original = pd.concat( [original[col].astype("category") for col in original], axis=1 ) diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index cc86436ee8fa9..1e84ba1dbffd9 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -184,7 +184,7 @@ def test_same_tz_min_max_axis_1(self, op, expected_col): df = DataFrame( pd.date_range("2016-01-01 00:00:00", periods=3, tz="UTC"), columns=["a"] ) - df["b"] = df.a.subtract(pd.Timedelta(seconds=3600)) + df["b"] = df.a.subtract(Timedelta(seconds=3600)) result = getattr(df, op)(axis=1) expected = df[expected_col].rename(None) tm.assert_series_equal(result, expected) @@ -364,11 +364,11 @@ def test_invalid_td64_reductions(self, opname): def test_minmax_tz(self, tz_naive_fixture): tz = tz_naive_fixture # monotonic - idx1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz=tz) + idx1 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz=tz) assert idx1.is_monotonic # non-monotonic - idx2 = pd.DatetimeIndex( + idx2 = DatetimeIndex( ["2011-01-01", pd.NaT, "2011-01-03", "2011-01-02", pd.NaT], tz=tz ) assert not idx2.is_monotonic @@ -927,7 +927,7 @@ def test_timedelta64_analytics(self): # index min/max dti = pd.date_range("2012-1-1", periods=3, freq="D") - td = Series(dti) - pd.Timestamp("20120101") + td = Series(dti) - Timestamp("20120101") result = td.idxmin() assert result == 0 @@ -958,11 +958,11 @@ def test_timedelta64_analytics(self): # max/min result = td.max() - expected = pd.Timedelta("2 days") + expected = Timedelta("2 days") assert result == expected result = td.min() - expected = pd.Timedelta("1 days") + expected = Timedelta("1 days") assert result == expected @pytest.mark.parametrize( @@ -1017,8 +1017,8 @@ class TestDatetime64SeriesReductions: "nat_ser", [ Series([pd.NaT, pd.NaT]), - Series([pd.NaT, pd.Timedelta("nat")]), - Series([pd.Timedelta("nat"), pd.Timedelta("nat")]), + Series([pd.NaT, Timedelta("nat")]), + Series([Timedelta("nat"), Timedelta("nat")]), ], ) def test_minmax_nat_series(self, nat_ser): @@ -1032,8 +1032,8 @@ def test_minmax_nat_series(self, nat_ser): "nat_df", [ DataFrame([pd.NaT, pd.NaT]), - DataFrame([pd.NaT, pd.Timedelta("nat")]), - DataFrame([pd.Timedelta("nat"), pd.Timedelta("nat")]), + DataFrame([pd.NaT, Timedelta("nat")]), + DataFrame([Timedelta("nat"), Timedelta("nat")]), ], ) def test_minmax_nat_dataframe(self, nat_df): @@ -1049,8 +1049,8 @@ def test_min_max(self): the_min = rng2.min() the_max = rng2.max() - assert isinstance(the_min, pd.Timestamp) - assert isinstance(the_max, pd.Timestamp) + assert isinstance(the_min, Timestamp) + assert isinstance(the_max, Timestamp) assert the_min == rng[0] assert the_max == rng[-1] @@ -1063,13 +1063,13 @@ def test_min_max_series(self): df = DataFrame({"TS": rng, "V": np.random.randn(len(rng)), "L": lvls}) result = df.TS.max() - exp = pd.Timestamp(df.TS.iat[-1]) - assert isinstance(result, pd.Timestamp) + exp = Timestamp(df.TS.iat[-1]) + assert isinstance(result, Timestamp) assert result == exp result = df.TS.min() - exp = pd.Timestamp(df.TS.iat[0]) - assert isinstance(result, pd.Timestamp) + exp = Timestamp(df.TS.iat[0]) + assert isinstance(result, Timestamp) assert result == exp diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 19e5a5dd7f5e7..7681807b60989 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -63,7 +63,7 @@ def test_custom_grouper(index): arr = [1] + [5] * 2592 idx = dti[0:-1:5] idx = idx.append(dti[-1:]) - idx = pd.DatetimeIndex(idx, freq="5T") + idx = DatetimeIndex(idx, freq="5T") expect = Series(arr, index=idx) # GH2763 - return in put dtype if we can @@ -440,7 +440,7 @@ def test_resample_how_method(): ) expected = Series( [11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22], - index=pd.DatetimeIndex( + index=DatetimeIndex( [ Timestamp("2015-03-31 21:48:50"), Timestamp("2015-03-31 21:49:00"), @@ -722,7 +722,7 @@ def test_resample_single_group(): [30.1, 31.6], index=[Timestamp("20070915 15:30:00"), Timestamp("20070915 15:40:00")], ) - expected = Series([0.75], index=pd.DatetimeIndex([Timestamp("20070915")], freq="D")) + expected = Series([0.75], index=DatetimeIndex([Timestamp("20070915")], freq="D")) result = s.resample("D").apply(lambda x: np.std(x)) tm.assert_series_equal(result, expected) @@ -748,7 +748,7 @@ def test_resample_origin(): resampled = ts.resample("5min", origin="1999-12-31 23:57:00").mean() tm.assert_index_equal(resampled.index, exp_rng) - offset_timestamp = pd.Timestamp(0) + pd.Timedelta("2min") + offset_timestamp = Timestamp(0) + Timedelta("2min") resampled = ts.resample("5min", origin=offset_timestamp).mean() tm.assert_index_equal(resampled.index, exp_rng) @@ -879,14 +879,14 @@ def test_resample_origin_with_day_freq_on_dst(): def _create_series(values, timestamps, freq="D"): return Series( values, - index=pd.DatetimeIndex( + index=DatetimeIndex( [Timestamp(t, tz=tz) for t in timestamps], freq=freq, ambiguous=True ), ) # test classical behavior of origin in a DST context - start = pd.Timestamp("2013-11-02", tz=tz) - end = pd.Timestamp("2013-11-03 23:59", tz=tz) + start = Timestamp("2013-11-02", tz=tz) + end = Timestamp("2013-11-03 23:59", tz=tz) rng = pd.date_range(start, end, freq="1h") ts = Series(np.ones(len(rng)), index=rng) @@ -896,8 +896,8 @@ def _create_series(values, timestamps, freq="D"): tm.assert_series_equal(result, expected) # test complex behavior of origin/offset in a DST context - start = pd.Timestamp("2013-11-03", tz=tz) - end = pd.Timestamp("2013-11-03 23:59", tz=tz) + start = Timestamp("2013-11-03", tz=tz) + end = Timestamp("2013-11-03 23:59", tz=tz) rng = pd.date_range(start, end, freq="1h") ts = Series(np.ones(len(rng)), index=rng) @@ -1275,7 +1275,7 @@ def test_resample_timegrouper(): for dates in [dates1, dates2, dates3]: df = DataFrame(dict(A=dates, B=np.arange(len(dates)))) result = df.set_index("A").resample("M").count() - exp_idx = pd.DatetimeIndex( + exp_idx = DatetimeIndex( ["2014-07-31", "2014-08-31", "2014-09-30", "2014-10-31", "2014-11-30"], freq="M", name="A", @@ -1438,7 +1438,7 @@ def test_resample_across_dst(): def test_groupby_with_dst_time_change(): # GH 24972 - index = pd.DatetimeIndex( + index = DatetimeIndex( [1478064900001000000, 1480037118776792000], tz="UTC" ).tz_convert("America/Chicago") @@ -1448,7 +1448,7 @@ def test_groupby_with_dst_time_change(): "2016-11-02", "2016-11-24", freq="d", tz="America/Chicago" ) - index = pd.DatetimeIndex(expected_index_values) + index = DatetimeIndex(expected_index_values) expected = DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index) tm.assert_frame_equal(result, expected) @@ -1563,7 +1563,7 @@ def test_downsample_across_dst_weekly(): result = df.resample("1W").sum() expected = DataFrame( [23, 42], - index=pd.DatetimeIndex( + index=DatetimeIndex( ["2017-03-26", "2017-04-02"], tz="Europe/Amsterdam", freq="W" ), ) @@ -1592,7 +1592,7 @@ def test_downsample_dst_at_midnight(): dti = date_range("2018-11-03", periods=3).tz_localize( "America/Havana", ambiguous=True ) - dti = pd.DatetimeIndex(dti, freq="D") + dti = DatetimeIndex(dti, freq="D") expected = DataFrame([7.5, 28.0, 44.5], index=dti) tm.assert_frame_equal(result, expected) @@ -1714,8 +1714,8 @@ def test_get_timestamp_range_edges(first, last, freq, exp_first, exp_last): last = pd.Period(last) last = last.to_timestamp(last.freq) - exp_first = pd.Timestamp(exp_first, freq=freq) - exp_last = pd.Timestamp(exp_last, freq=freq) + exp_first = Timestamp(exp_first, freq=freq) + exp_last = Timestamp(exp_last, freq=freq) freq = pd.tseries.frequencies.to_offset(freq) result = _get_timestamp_range_edges(first, last, freq) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index ca31ef684257d..15dd49f8bf182 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -155,7 +155,7 @@ def test_groupby_with_origin(): tm.assert_index_equal(count_ts.index, count_ts2.index) # test origin on 1970-01-01 00:00:00 - origin = pd.Timestamp(0) + origin = Timestamp(0) adjusted_grouper = pd.Grouper(freq=freq, origin=origin) adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count") adjusted_count_ts = adjusted_count_ts[middle:end] @@ -163,7 +163,7 @@ def test_groupby_with_origin(): tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2) # test origin on 2049-10-18 20:00:00 - origin_future = pd.Timestamp(0) + pd.Timedelta("1399min") * 30_000 + origin_future = Timestamp(0) + pd.Timedelta("1399min") * 30_000 adjusted_grouper2 = pd.Grouper(freq=freq, origin=origin_future) adjusted2_count_ts = ts.groupby(adjusted_grouper2).agg("count") adjusted2_count_ts = adjusted2_count_ts[middle:end] diff --git a/pandas/tests/reshape/concat/test_append_common.py b/pandas/tests/reshape/concat/test_append_common.py index 395673e9a47ab..8b7fb69f7ee05 100644 --- a/pandas/tests/reshape/concat/test_append_common.py +++ b/pandas/tests/reshape/concat/test_append_common.py @@ -40,7 +40,7 @@ def setup_method(self, method): "bool": [True, False, True], "int64": [1, 2, 3], "float64": [1.1, np.nan, 3.3], - "category": pd.Categorical(["X", "Y", "Z"]), + "category": Categorical(["X", "Y", "Z"]), "object": ["a", "b", "c"], "datetime64[ns]": dt_data, "datetime64[ns, US/Eastern]": tz_data, @@ -80,8 +80,8 @@ def test_concatlike_same_dtypes(self): vals3 = vals1 if typ1 == "category": - exp_data = pd.Categorical(list(vals1) + list(vals2)) - exp_data3 = pd.Categorical(list(vals1) + list(vals2) + list(vals3)) + exp_data = Categorical(list(vals1) + list(vals2)) + exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3)) else: exp_data = vals1 + vals2 exp_data3 = vals1 + vals2 + vals3 @@ -637,16 +637,14 @@ def test_concat_categorical_multi_coercion(self): def test_concat_categorical_ordered(self): # GH 13524 - s1 = Series(pd.Categorical([1, 2, np.nan], ordered=True)) - s2 = Series(pd.Categorical([2, 1, 2], ordered=True)) + s1 = Series(Categorical([1, 2, np.nan], ordered=True)) + s2 = Series(Categorical([2, 1, 2], ordered=True)) - exp = Series(pd.Categorical([1, 2, np.nan, 2, 1, 2], ordered=True)) + exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True)) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1.append(s2, ignore_index=True), exp) - exp = Series( - pd.Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True) - ) + exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)) tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp) tm.assert_series_equal(s1.append([s2, s1], ignore_index=True), exp) diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 90172abefb8c4..a1351ce782669 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -514,7 +514,7 @@ def test_duplicate_keys(keys): s2 = Series([10, 11, 12], name="d") result = concat([df, s1, s2], axis=1, keys=keys) expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] - expected_columns = pd.MultiIndex.from_tuples( + expected_columns = MultiIndex.from_tuples( [(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")] ) expected = DataFrame(expected_values, columns=expected_columns) diff --git a/pandas/tests/reshape/concat/test_datetimes.py b/pandas/tests/reshape/concat/test_datetimes.py index 8783f539faa65..a4d6b58307523 100644 --- a/pandas/tests/reshape/concat/test_datetimes.py +++ b/pandas/tests/reshape/concat/test_datetimes.py @@ -205,15 +205,15 @@ def test_concat_NaT_dataframes(self, tz): first = DataFrame([[pd.NaT], [pd.NaT]]) first = first.apply(lambda x: x.dt.tz_localize(tz)) second = DataFrame( - [[pd.Timestamp("2015/01/01", tz=tz)], [pd.Timestamp("2016/01/01", tz=tz)]], + [[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]], index=[2, 3], ) expected = DataFrame( [ pd.NaT, pd.NaT, - pd.Timestamp("2015/01/01", tz=tz), - pd.Timestamp("2016/01/01", tz=tz), + Timestamp("2015/01/01", tz=tz), + Timestamp("2016/01/01", tz=tz), ] ) @@ -222,7 +222,7 @@ def test_concat_NaT_dataframes(self, tz): @pytest.mark.parametrize("tz1", [None, "UTC"]) @pytest.mark.parametrize("tz2", [None, "UTC"]) - @pytest.mark.parametrize("s", [pd.NaT, pd.Timestamp("20150101")]) + @pytest.mark.parametrize("s", [pd.NaT, Timestamp("20150101")]) def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s): # GH 12396 @@ -263,8 +263,8 @@ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1) second = DataFrame( [ - [pd.Timestamp("2015/01/01", tz=tz2)], - [pd.Timestamp("2016/01/01", tz=tz2)], + [Timestamp("2015/01/01", tz=tz2)], + [Timestamp("2016/01/01", tz=tz2)], ], index=[2, 3], ) @@ -273,8 +273,8 @@ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): [ pd.NaT, pd.NaT, - pd.Timestamp("2015/01/01", tz=tz2), - pd.Timestamp("2016/01/01", tz=tz2), + Timestamp("2015/01/01", tz=tz2), + Timestamp("2016/01/01", tz=tz2), ] ) if tz1 != tz2: @@ -344,12 +344,12 @@ def test_concat_tz_series(self): def test_concat_tz_series_tzlocal(self): # see gh-13583 x = [ - pd.Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()), - pd.Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()), + Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()), + Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()), ] y = [ - pd.Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()), - pd.Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()), + Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()), + Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()), ] result = concat([Series(x), Series(y)], ignore_index=True) @@ -359,8 +359,8 @@ def test_concat_tz_series_tzlocal(self): def test_concat_tz_series_with_datetimelike(self): # see gh-12620: tz and timedelta x = [ - pd.Timestamp("2011-01-01", tz="US/Eastern"), - pd.Timestamp("2011-02-01", tz="US/Eastern"), + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-02-01", tz="US/Eastern"), ] y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")] result = concat([Series(x), Series(y)], ignore_index=True) @@ -374,8 +374,8 @@ def test_concat_tz_series_with_datetimelike(self): def test_concat_tz_frame(self): df2 = DataFrame( dict( - A=pd.Timestamp("20130102", tz="US/Eastern"), - B=pd.Timestamp("20130603", tz="CET"), + A=Timestamp("20130102", tz="US/Eastern"), + B=Timestamp("20130603", tz="CET"), ), index=range(5), ) @@ -501,7 +501,7 @@ def test_concat_period_other_series(self): # non-period x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) - y = Series(pd.DatetimeIndex(["2015-11-01", "2015-12-01"])) + y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"])) expected = Series([x[0], x[1], y[0], y[1]], dtype="object") result = concat([x, y], ignore_index=True) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reshape/concat/test_index.py b/pandas/tests/reshape/concat/test_index.py index e283212b4e60c..3fc886893b55a 100644 --- a/pandas/tests/reshape/concat/test_index.py +++ b/pandas/tests/reshape/concat/test_index.py @@ -204,11 +204,11 @@ def test_concat_multiindex_with_keys(self): def test_concat_multiindex_with_none_in_index_names(self): # GH 15787 - index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None]) + index = MultiIndex.from_product([[1], range(5)], names=["level1", None]) df = DataFrame({"col": range(5)}, index=index, dtype=np.int32) result = concat([df, df], keys=[1, 2], names=["level2"]) - index = pd.MultiIndex.from_product( + index = MultiIndex.from_product( [[1, 2], [1], range(5)], names=["level2", "level1", None] ) expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32) @@ -219,7 +219,7 @@ def test_concat_multiindex_with_none_in_index_names(self): level1 = [1] * 7 no_name = list(range(5)) + list(range(2)) tuples = list(zip(level2, level1, no_name)) - index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) + index = MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) expected = DataFrame({"col": no_name}, index=index, dtype=np.int32) tm.assert_frame_equal(result, expected) @@ -242,14 +242,14 @@ def test_concat_multiindex_dfs_with_deepcopy(self): # GH 9967 from copy import deepcopy - example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]]) + example_multiindex1 = MultiIndex.from_product([["a"], ["b"]]) example_dataframe1 = DataFrame([0], index=example_multiindex1) - example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]]) + example_multiindex2 = MultiIndex.from_product([["a"], ["c"]]) example_dataframe2 = DataFrame([1], index=example_multiindex2) example_dict = {"s1": example_dataframe1, "s2": example_dataframe2} - expected_index = pd.MultiIndex( + expected_index = MultiIndex( levels=[["s1", "s2"], ["a"], ["b", "c"]], codes=[[0, 1], [0, 0], [0, 1]], names=["testname", None, None], diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 5968fd1834f8c..7db92eb55fa0b 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -792,14 +792,14 @@ def test_join_inner_multiindex_deterministic_order(): # GH: 36910 left = DataFrame( data={"e": 5}, - index=pd.MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")), + index=MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")), ) right = DataFrame( - data={"f": 6}, index=pd.MultiIndex.from_tuples([(2, 3)], names=("b", "c")) + data={"f": 6}, index=MultiIndex.from_tuples([(2, 3)], names=("b", "c")) ) result = left.join(right, how="inner") expected = DataFrame( {"e": [5], "f": [6]}, - index=pd.MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")), + index=MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")), ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index c4c9b0e516192..bb2860b88b288 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1334,15 +1334,15 @@ def test_merge_take_missing_values_from_index_of_other_dtype(self): left = DataFrame( { "a": [1, 2, 3], - "key": pd.Categorical(["a", "a", "b"], categories=list("abc")), + "key": Categorical(["a", "a", "b"], categories=list("abc")), } ) - right = DataFrame({"b": [1, 2, 3]}, index=pd.CategoricalIndex(["a", "b", "c"])) + right = DataFrame({"b": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"])) result = left.merge(right, left_on="key", right_index=True, how="right") expected = DataFrame( { "a": [1, 2, 3, None], - "key": pd.Categorical(["a", "a", "b", "c"]), + "key": Categorical(["a", "a", "b", "c"]), "b": [1, 1, 2, 3], }, index=[0, 1, 2, np.nan], @@ -1687,7 +1687,7 @@ def tests_merge_categorical_unordered_equal(self): result = pd.merge(df1, df2, on=["Foo"]) expected = DataFrame( { - "Foo": pd.Categorical(["A", "B", "C"]), + "Foo": Categorical(["A", "B", "C"]), "Left": ["A0", "B0", "C0"], "Right": ["A1", "B1", "C1"], } diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index 65673bdde4257..b1922241c7843 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -196,7 +196,7 @@ def test_merge_multiple_cols_with_mixed_cols_index(self): # GH29522 s = pd.Series( range(6), - pd.MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]), + MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]), name="Amount", ) df = DataFrame({"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0}) @@ -794,7 +794,7 @@ def test_merge_datetime_index(self, box): tm.assert_frame_equal(result, expected) def test_single_common_level(self): - index_left = pd.MultiIndex.from_tuples( + index_left = MultiIndex.from_tuples( [("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"] ) @@ -802,7 +802,7 @@ def test_single_common_level(self): {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=index_left ) - index_right = pd.MultiIndex.from_tuples( + index_right = MultiIndex.from_tuples( [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"] ) @@ -822,8 +822,8 @@ def test_join_multi_wrong_order(self): # GH 25760 # GH 28956 - midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) - midx3 = pd.MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"]) + midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + midx3 = MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"]) left = DataFrame(index=midx1, data={"x": [10, 20, 30, 40]}) right = DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]}) diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 99beff39e8e09..1f39302845ae9 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -1049,7 +1049,7 @@ def test_col_substring_of_stubname(self): "PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67}, "PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67}, } - wide_df = pd.DataFrame.from_dict(wide_data) + wide_df = DataFrame.from_dict(wide_data) expected = pd.wide_to_long( wide_df, stubnames=["PA"], i=["node_id", "A"], j="time" ) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 642e6a691463e..adab64577ee7a 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -197,7 +197,7 @@ def test_pivot_table_categorical(self): df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) result = pd.pivot_table(df, values="values", index=["A", "B"], dropna=True) - exp_index = pd.MultiIndex.from_arrays([cat1, cat2], names=["A", "B"]) + exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"]) expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index) tm.assert_frame_equal(result, expected) @@ -233,7 +233,7 @@ def test_pivot_with_non_observable_dropna(self, dropna): # gh-21133 df = DataFrame( { - "A": pd.Categorical( + "A": Categorical( [np.nan, "low", "high", "low", "high"], categories=["low", "high"], ordered=True, @@ -246,7 +246,7 @@ def test_pivot_with_non_observable_dropna(self, dropna): expected = DataFrame( {"B": [2, 3]}, index=Index( - pd.Categorical.from_codes( + Categorical.from_codes( [0, 1], categories=["low", "high"], ordered=True ), name="A", @@ -258,7 +258,7 @@ def test_pivot_with_non_observable_dropna(self, dropna): # gh-21378 df = DataFrame( { - "A": pd.Categorical( + "A": Categorical( ["left", "low", "high", "low", "high"], categories=["low", "high", "left"], ordered=True, @@ -271,7 +271,7 @@ def test_pivot_with_non_observable_dropna(self, dropna): expected = DataFrame( {"B": [2, 3, 0]}, index=Index( - pd.Categorical.from_codes( + Categorical.from_codes( [0, 1, 2], categories=["low", "high", "left"], ordered=True ), name="A", @@ -294,7 +294,7 @@ def test_pivot_with_interval_index_margins(self): { "A": np.arange(4, 0, -1, dtype=np.intp), "B": ["a", "b", "a", "b"], - "C": pd.Categorical(ordered_cat, ordered=True).sort_values( + "C": Categorical(ordered_cat, ordered=True).sort_values( ascending=False ), } @@ -400,7 +400,7 @@ def test_pivot_no_values(self): df = DataFrame({"A": [1, 2, 3, 4, 5]}, index=idx) res = df.pivot_table(index=df.index.month, columns=df.index.day) - exp_columns = pd.MultiIndex.from_tuples([("A", 1), ("A", 2)]) + exp_columns = MultiIndex.from_tuples([("A", 1), ("A", 2)]) exp = DataFrame([[2.5, 4.0], [2.0, np.nan]], index=[1, 2], columns=exp_columns) tm.assert_frame_equal(res, exp) @@ -414,7 +414,7 @@ def test_pivot_no_values(self): res = df.pivot_table( index=df.index.month, columns=pd.Grouper(key="dt", freq="M") ) - exp_columns = pd.MultiIndex.from_tuples([("A", pd.Timestamp("2011-01-31"))]) + exp_columns = MultiIndex.from_tuples([("A", pd.Timestamp("2011-01-31"))]) exp_columns.names = [None, "dt"] exp = DataFrame([3.25, 2.0], index=[1, 2], columns=exp_columns) tm.assert_frame_equal(res, exp) @@ -544,7 +544,7 @@ def test_pivot_with_tz(self, method): exp_col2 = pd.DatetimeIndex( ["2014/01/01 09:00", "2014/01/02 09:00"] * 2, name="dt2", tz="Asia/Tokyo" ) - exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2]) + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) expected = DataFrame( [[0, 2, 0, 2], [1, 3, 1, 3]], index=pd.DatetimeIndex( @@ -653,7 +653,7 @@ def test_pivot_periods(self, method): exp_col1 = Index(["data1", "data1", "data2", "data2"]) exp_col2 = pd.PeriodIndex(["2013-01", "2013-02"] * 2, name="p2", freq="M") - exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2]) + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) expected = DataFrame( [[0, 2, 0, 2], [1, 3, 1, 3]], index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"), @@ -1705,7 +1705,7 @@ def test_pivot_table_margins_name_with_aggfunc_list(self): ("max", "cost", "T"), ("max", "cost", margins_name), ] - cols = pd.MultiIndex.from_tuples(tups, names=[None, None, "day"]) + cols = MultiIndex.from_tuples(tups, names=[None, None, "day"]) expected = DataFrame(table.values, index=ix, columns=cols) tm.assert_frame_equal(table, expected) @@ -1762,8 +1762,8 @@ def test_pivot_with_categorical(self, observed, ordered): col = [np.nan, "A", "B", np.nan, "A"] df = DataFrame( { - "In": pd.Categorical(idx, categories=["low", "high"], ordered=ordered), - "Col": pd.Categorical(col, categories=["A", "B"], ordered=ordered), + "In": Categorical(idx, categories=["low", "high"], ordered=ordered), + "Col": Categorical(col, categories=["A", "B"], ordered=ordered), "Val": range(1, 6), } ) @@ -1776,9 +1776,7 @@ def test_pivot_with_categorical(self, observed, ordered): expected = DataFrame(data=[[2.0, np.nan], [np.nan, 3.0]], columns=expected_cols) expected.index = Index( - pd.Categorical( - ["low", "high"], categories=["low", "high"], ordered=ordered - ), + Categorical(["low", "high"], categories=["low", "high"], ordered=ordered), name="In", ) @@ -2013,7 +2011,7 @@ def ret_none(x): ) data = [[3, 1, np.nan, np.nan, 1, 1], [13, 6, np.nan, np.nan, 1, 1]] - col = pd.MultiIndex.from_product( + col = MultiIndex.from_product( [["ret_sum", "ret_none", "ret_one"], ["apple", "peach"]], names=[None, "fruit"], ) @@ -2143,7 +2141,7 @@ def test_pivot_index_none(self): # omit values result = frame.pivot(columns="columns") - expected.columns = pd.MultiIndex.from_tuples( + expected.columns = MultiIndex.from_tuples( [("values", "One"), ("values", "Two")], names=[None, "columns"] ) expected.index.name = "index" diff --git a/pandas/tests/reshape/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py index f6a4f8c0cf601..b44f4844b8e2d 100644 --- a/pandas/tests/reshape/test_union_categoricals.py +++ b/pandas/tests/reshape/test_union_categoricals.py @@ -72,13 +72,13 @@ def test_union_categorical(self): def test_union_categoricals_nan(self): # GH 13759 res = union_categoricals( - [pd.Categorical([1, 2, np.nan]), pd.Categorical([3, 2, np.nan])] + [Categorical([1, 2, np.nan]), Categorical([3, 2, np.nan])] ) exp = Categorical([1, 2, np.nan, 3, 2, np.nan]) tm.assert_categorical_equal(res, exp) res = union_categoricals( - [pd.Categorical(["A", "B"]), pd.Categorical(["B", "B", np.nan])] + [Categorical(["A", "B"]), Categorical(["B", "B", np.nan])] ) exp = Categorical(["A", "B", "B", "B", np.nan]) tm.assert_categorical_equal(res, exp) @@ -86,7 +86,7 @@ def test_union_categoricals_nan(self): val1 = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-03-01"), pd.NaT] val2 = [pd.NaT, pd.Timestamp("2011-01-01"), pd.Timestamp("2011-02-01")] - res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)]) + res = union_categoricals([Categorical(val1), Categorical(val2)]) exp = Categorical( val1 + val2, categories=[ @@ -100,22 +100,22 @@ def test_union_categoricals_nan(self): # all NaN res = union_categoricals( [ - pd.Categorical(np.array([np.nan, np.nan], dtype=object)), - pd.Categorical(["X"]), + Categorical(np.array([np.nan, np.nan], dtype=object)), + Categorical(["X"]), ] ) exp = Categorical([np.nan, np.nan, "X"]) tm.assert_categorical_equal(res, exp) res = union_categoricals( - [pd.Categorical([np.nan, np.nan]), pd.Categorical([np.nan, np.nan])] + [Categorical([np.nan, np.nan]), Categorical([np.nan, np.nan])] ) exp = Categorical([np.nan, np.nan, np.nan, np.nan]) tm.assert_categorical_equal(res, exp) def test_union_categoricals_empty(self): # GH 13759 - res = union_categoricals([pd.Categorical([]), pd.Categorical([])]) + res = union_categoricals([Categorical([]), Categorical([])]) exp = Categorical([]) tm.assert_categorical_equal(res, exp) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 5006e16b6a7e0..f150e5e5b18b2 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -655,7 +655,7 @@ def test_to_timestamp_business_end(self): per = pd.Period("1990-01-05", "B") # Friday result = per.to_timestamp("B", how="E") - expected = pd.Timestamp("1990-01-06") - pd.Timedelta(nanoseconds=1) + expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1) assert result == expected @pytest.mark.parametrize( @@ -866,7 +866,7 @@ def test_end_time_business_friday(self): per = Period("1990-01-05", "B") result = per.end_time - expected = pd.Timestamp("1990-01-06") - pd.Timedelta(nanoseconds=1) + expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1) assert result == expected def test_anchor_week_end_time(self): diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index d4d7e4b85268f..8ec8f1e0457fb 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -927,7 +927,7 @@ def test_compare_timedelta_ndarray(self): def test_compare_td64_ndarray(self): # GG#33441 arr = np.arange(5).astype("timedelta64[ns]") - td = pd.Timedelta(arr[1]) + td = Timedelta(arr[1]) expected = np.array([False, True, False, False, False], dtype=bool) diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 6aee8b3ab34fa..c25b8936c1b29 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -264,8 +264,8 @@ def test_getitem_setitem_datetimeindex(): # see GH#18376, GH#18162 ts[(ts.index >= lb) & (ts.index <= rb)] - lb = pd.Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo) - rb = pd.Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo) + lb = Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo) + rb = Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo) result = ts[(ts.index >= lb) & (ts.index <= rb)] expected = ts[4:8] tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 214694443ba2a..88087110fc221 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -133,10 +133,10 @@ def test_getitem_fancy(string_series, object_series): def test_type_promotion(): # GH12599 s = Series(dtype=object) - s["a"] = pd.Timestamp("2016-01-01") + s["a"] = Timestamp("2016-01-01") s["b"] = 3.0 s["c"] = "foo" - expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"], index=["a", "b", "c"]) + expected = Series([Timestamp("2016-01-01"), 3.0, "foo"], index=["a", "b", "c"]) tm.assert_series_equal(s, expected) @@ -181,13 +181,13 @@ def test_series_box_timestamp(): rng = pd.date_range("20090415", "20090519", freq="B") ser = Series(rng) - assert isinstance(ser[5], pd.Timestamp) + assert isinstance(ser[5], Timestamp) rng = pd.date_range("20090415", "20090519", freq="B") ser = Series(rng, index=rng) - assert isinstance(ser[5], pd.Timestamp) + assert isinstance(ser[5], Timestamp) - assert isinstance(ser.iat[5], pd.Timestamp) + assert isinstance(ser.iat[5], Timestamp) def test_series_box_timedelta(): @@ -354,27 +354,27 @@ def test_setitem_with_tz(tz): # scalar s = orig.copy() - s[1] = pd.Timestamp("2011-01-01", tz=tz) + s[1] = Timestamp("2011-01-01", tz=tz) exp = Series( [ - pd.Timestamp("2016-01-01 00:00", tz=tz), - pd.Timestamp("2011-01-01 00:00", tz=tz), - pd.Timestamp("2016-01-01 02:00", tz=tz), + Timestamp("2016-01-01 00:00", tz=tz), + Timestamp("2011-01-01 00:00", tz=tz), + Timestamp("2016-01-01 02:00", tz=tz), ] ) tm.assert_series_equal(s, exp) s = orig.copy() - s.loc[1] = pd.Timestamp("2011-01-01", tz=tz) + s.loc[1] = Timestamp("2011-01-01", tz=tz) tm.assert_series_equal(s, exp) s = orig.copy() - s.iloc[1] = pd.Timestamp("2011-01-01", tz=tz) + s.iloc[1] = Timestamp("2011-01-01", tz=tz) tm.assert_series_equal(s, exp) # vector vals = Series( - [pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)], + [Timestamp("2011-01-01", tz=tz), Timestamp("2012-01-01", tz=tz)], index=[1, 2], ) assert vals.dtype == f"datetime64[ns, {tz}]" @@ -382,9 +382,9 @@ def test_setitem_with_tz(tz): s[[1, 2]] = vals exp = Series( [ - pd.Timestamp("2016-01-01 00:00", tz=tz), - pd.Timestamp("2011-01-01 00:00", tz=tz), - pd.Timestamp("2012-01-01 00:00", tz=tz), + Timestamp("2016-01-01 00:00", tz=tz), + Timestamp("2011-01-01 00:00", tz=tz), + Timestamp("2012-01-01 00:00", tz=tz), ] ) tm.assert_series_equal(s, exp) @@ -406,27 +406,27 @@ def test_setitem_with_tz_dst(): # scalar s = orig.copy() - s[1] = pd.Timestamp("2011-01-01", tz=tz) + s[1] = Timestamp("2011-01-01", tz=tz) exp = Series( [ - pd.Timestamp("2016-11-06 00:00-04:00", tz=tz), - pd.Timestamp("2011-01-01 00:00-05:00", tz=tz), - pd.Timestamp("2016-11-06 01:00-05:00", tz=tz), + Timestamp("2016-11-06 00:00-04:00", tz=tz), + Timestamp("2011-01-01 00:00-05:00", tz=tz), + Timestamp("2016-11-06 01:00-05:00", tz=tz), ] ) tm.assert_series_equal(s, exp) s = orig.copy() - s.loc[1] = pd.Timestamp("2011-01-01", tz=tz) + s.loc[1] = Timestamp("2011-01-01", tz=tz) tm.assert_series_equal(s, exp) s = orig.copy() - s.iloc[1] = pd.Timestamp("2011-01-01", tz=tz) + s.iloc[1] = Timestamp("2011-01-01", tz=tz) tm.assert_series_equal(s, exp) # vector vals = Series( - [pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)], + [Timestamp("2011-01-01", tz=tz), Timestamp("2012-01-01", tz=tz)], index=[1, 2], ) assert vals.dtype == f"datetime64[ns, {tz}]" @@ -434,9 +434,9 @@ def test_setitem_with_tz_dst(): s[[1, 2]] = vals exp = Series( [ - pd.Timestamp("2016-11-06 00:00", tz=tz), - pd.Timestamp("2011-01-01 00:00", tz=tz), - pd.Timestamp("2012-01-01 00:00", tz=tz), + Timestamp("2016-11-06 00:00", tz=tz), + Timestamp("2011-01-01 00:00", tz=tz), + Timestamp("2012-01-01 00:00", tz=tz), ] ) tm.assert_series_equal(s, exp) @@ -568,7 +568,7 @@ def test_timedelta_assignment(): s = Series(10 * [np.timedelta64(10, "m")]) s.loc[[1, 2, 3]] = np.timedelta64(20, "m") expected = Series(10 * [np.timedelta64(10, "m")]) - expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, "m")) + expected.loc[[1, 2, 3]] = Timedelta(np.timedelta64(20, "m")) tm.assert_series_equal(s, expected) @@ -637,9 +637,9 @@ def test_td64_series_assign_nat(nat_val, should_cast): @pytest.mark.parametrize( "td", [ - pd.Timedelta("9 days"), - pd.Timedelta("9 days").to_timedelta64(), - pd.Timedelta("9 days").to_pytimedelta(), + Timedelta("9 days"), + Timedelta("9 days").to_timedelta64(), + Timedelta("9 days").to_pytimedelta(), ], ) def test_append_timedelta_does_not_cast(td): @@ -649,12 +649,12 @@ def test_append_timedelta_does_not_cast(td): ser = Series(["x"]) ser["td"] = td tm.assert_series_equal(ser, expected) - assert isinstance(ser["td"], pd.Timedelta) + assert isinstance(ser["td"], Timedelta) ser = Series(["x"]) - ser.loc["td"] = pd.Timedelta("9 days") + ser.loc["td"] = Timedelta("9 days") tm.assert_series_equal(ser, expected) - assert isinstance(ser["td"], pd.Timedelta) + assert isinstance(ser["td"], Timedelta) def test_underlying_data_conversion(): diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py index 404136bdfa2db..27bbb47e1d0d1 100644 --- a/pandas/tests/series/indexing/test_where.py +++ b/pandas/tests/series/indexing/test_where.py @@ -418,7 +418,7 @@ def test_where_datetime_conversion(): # GH 15701 timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"] - s = Series([pd.Timestamp(t) for t in timestamps]) + s = Series([Timestamp(t) for t in timestamps]) rs = s.where(Series([False, True])) expected = Series([pd.NaT, s[1]]) tm.assert_series_equal(rs, expected) diff --git a/pandas/tests/series/methods/test_quantile.py b/pandas/tests/series/methods/test_quantile.py index 964d62602edaa..1d3e91d07afe3 100644 --- a/pandas/tests/series/methods/test_quantile.py +++ b/pandas/tests/series/methods/test_quantile.py @@ -121,27 +121,27 @@ def test_quantile_nan(self): "case", [ [ - pd.Timestamp("2011-01-01"), - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-03"), + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), ], [ - pd.Timestamp("2011-01-01", tz="US/Eastern"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), - pd.Timestamp("2011-01-03", tz="US/Eastern"), + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), ], [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")], # NaT [ - pd.Timestamp("2011-01-01"), - pd.Timestamp("2011-01-02"), - pd.Timestamp("2011-01-03"), + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), pd.NaT, ], [ - pd.Timestamp("2011-01-01", tz="US/Eastern"), - pd.Timestamp("2011-01-02", tz="US/Eastern"), - pd.Timestamp("2011-01-03", tz="US/Eastern"), + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), pd.NaT, ], [ diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py index 38955ea7f06c4..ded4500ba478a 100644 --- a/pandas/tests/series/methods/test_unstack.py +++ b/pandas/tests/series/methods/test_unstack.py @@ -40,7 +40,7 @@ def test_unstack(): tm.assert_frame_equal(unstacked, expected) # GH5873 - idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]]) + idx = MultiIndex.from_arrays([[101, 102], [3.5, np.nan]]) ts = Series([1, 2], index=idx) left = ts.unstack() right = DataFrame( @@ -48,7 +48,7 @@ def test_unstack(): ) tm.assert_frame_equal(left, right) - idx = pd.MultiIndex.from_arrays( + idx = MultiIndex.from_arrays( [ ["cat", "cat", "cat", "dog", "dog"], ["a", "a", "b", "a", "b"], @@ -61,13 +61,13 @@ def test_unstack(): columns=["cat", "dog"], ) tpls = [("a", 1), ("a", 2), ("b", np.nan), ("b", 1)] - right.index = pd.MultiIndex.from_tuples(tpls) + right.index = MultiIndex.from_tuples(tpls) tm.assert_frame_equal(ts.unstack(level=0), right) def test_unstack_tuplename_in_multiindex(): # GH 19966 - idx = pd.MultiIndex.from_product( + idx = MultiIndex.from_product( [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")] ) ser = Series(1, index=idx) @@ -75,7 +75,7 @@ def test_unstack_tuplename_in_multiindex(): expected = DataFrame( [[1, 1, 1], [1, 1, 1], [1, 1, 1]], - columns=pd.MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]), + columns=MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]), index=pd.Index([1, 2, 3], name=("B", "b")), ) tm.assert_frame_equal(result, expected) @@ -87,16 +87,14 @@ def test_unstack_tuplename_in_multiindex(): ( ("A", "a"), [[1, 1], [1, 1], [1, 1], [1, 1]], - pd.MultiIndex.from_tuples( - [(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"] - ), - pd.MultiIndex.from_tuples([("a",), ("b",)], names=[("A", "a")]), + MultiIndex.from_tuples([(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]), + MultiIndex.from_tuples([("a",), ("b",)], names=[("A", "a")]), ), ( (("A", "a"), "B"), [[1, 1, 1, 1], [1, 1, 1, 1]], pd.Index([3, 4], name="C"), - pd.MultiIndex.from_tuples( + MultiIndex.from_tuples( [("a", 1), ("a", 2), ("b", 1), ("b", 2)], names=[("A", "a"), "B"] ), ), @@ -106,7 +104,7 @@ def test_unstack_mixed_type_name_in_multiindex( unstack_idx, expected_values, expected_index, expected_columns ): # GH 19966 - idx = pd.MultiIndex.from_product( + idx = MultiIndex.from_product( [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"] ) ser = Series(1, index=idx) diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py index 37da31fb2329a..e4fdfb2838b70 100644 --- a/pandas/tests/series/methods/test_value_counts.py +++ b/pandas/tests/series/methods/test_value_counts.py @@ -85,15 +85,15 @@ def test_value_counts_period(self): def test_value_counts_categorical_ordered(self): # most dtypes are tested in tests/base - values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True) + values = Categorical([1, 2, 3, 1, 1, 3], ordered=True) - exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=True) + exp_idx = CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=True) exp = Series([3, 2, 1], index=exp_idx, name="xxx") ser = Series(values, name="xxx") tm.assert_series_equal(ser.value_counts(), exp) # check CategoricalIndex outputs the same result - idx = pd.CategoricalIndex(values, name="xxx") + idx = CategoricalIndex(values, name="xxx") tm.assert_series_equal(idx.value_counts(), exp) # normalize @@ -102,15 +102,15 @@ def test_value_counts_categorical_ordered(self): tm.assert_series_equal(idx.value_counts(normalize=True), exp) def test_value_counts_categorical_not_ordered(self): - values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False) + values = Categorical([1, 2, 3, 1, 1, 3], ordered=False) - exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=False) + exp_idx = CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=False) exp = Series([3, 2, 1], index=exp_idx, name="xxx") ser = Series(values, name="xxx") tm.assert_series_equal(ser.value_counts(), exp) # check CategoricalIndex outputs the same result - idx = pd.CategoricalIndex(values, name="xxx") + idx = CategoricalIndex(values, name="xxx") tm.assert_series_equal(idx.value_counts(), exp) # normalize diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index bb091ba1beb2d..5c4118bc40f4d 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -71,7 +71,7 @@ def test_empty_constructor(self, constructor, check_index_type): def test_invalid_dtype(self): # GH15520 msg = "not understood" - invalid_list = [pd.Timestamp, "pd.Timestamp", list] + invalid_list = [Timestamp, "Timestamp", list] for dtype in invalid_list: with pytest.raises(TypeError, match=msg): Series([], name="time", dtype=dtype) @@ -310,17 +310,17 @@ def test_constructor_map(self): tm.assert_series_equal(result, exp) def test_constructor_categorical(self): - cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True) + cat = Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True) res = Series(cat) tm.assert_categorical_equal(res.values, cat) # can cast to a new dtype - result = Series(pd.Categorical([1, 2, 3]), dtype="int64") + result = Series(Categorical([1, 2, 3]), dtype="int64") expected = Series([1, 2, 3], dtype="int64") tm.assert_series_equal(result, expected) # GH12574 - cat = Series(pd.Categorical([1, 2, 3]), dtype="category") + cat = Series(Categorical([1, 2, 3]), dtype="category") assert is_categorical_dtype(cat) assert is_categorical_dtype(cat.dtype) s = Series([1, 2, 3], dtype="category") @@ -453,7 +453,7 @@ def test_categorical_sideeffects_free(self): def test_unordered_compare_equal(self): left = Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"])) - right = Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"])) + right = Series(Categorical(["a", "b", np.nan], categories=["a", "b"])) tm.assert_series_equal(left, right) def test_constructor_maskedarray(self): @@ -557,7 +557,7 @@ def test_constructor_default_index(self): [1, 2, 3], (1, 2, 3), list(range(3)), - pd.Categorical(["a", "b", "a"]), + Categorical(["a", "b", "a"]), (i for i in range(3)), map(lambda x: x, range(3)), ], @@ -940,8 +940,8 @@ def test_constructor_with_datetime_tz(self): # inference s = Series( [ - pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), - pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), ] ) assert s.dtype == "datetime64[ns, US/Pacific]" @@ -949,8 +949,8 @@ def test_constructor_with_datetime_tz(self): s = Series( [ - pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), - pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"), + Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"), ] ) assert s.dtype == "object" @@ -979,7 +979,7 @@ def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit): def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg): # GH 17415: With naive string result = Series([arg], dtype="datetime64[ns, CET]") - expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET") + expected = Series(Timestamp(arg)).dt.tz_localize("CET") tm.assert_series_equal(result, expected) def test_constructor_datetime64_bigendian(self): diff --git a/pandas/tests/series/test_dt_accessor.py b/pandas/tests/series/test_dt_accessor.py index 2f30c0621cc06..7a84f642aebc2 100644 --- a/pandas/tests/series/test_dt_accessor.py +++ b/pandas/tests/series/test_dt_accessor.py @@ -633,7 +633,7 @@ def test_dt_accessor_updates_on_inplace(self): def test_date_tz(self): # GH11757 - rng = pd.DatetimeIndex( + rng = DatetimeIndex( ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz="US/Eastern", ) @@ -646,7 +646,7 @@ def test_dt_timetz_accessor(self, tz_naive_fixture): # GH21358 tz = maybe_get_tz(tz_naive_fixture) - dtindex = pd.DatetimeIndex( + dtindex = DatetimeIndex( ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz=tz ) s = Series(dtindex) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 6f5345f802a7c..189c792ac228b 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -412,7 +412,7 @@ def test_subsets_multiindex_dtype(self): # GH 20757 data = [["x", 1]] columns = [("a", "b", np.nan), ("a", "c", 0.0)] - df = DataFrame(data, columns=pd.MultiIndex.from_tuples(columns)) + df = DataFrame(data, columns=MultiIndex.from_tuples(columns)) expected = df.dtypes.a.b result = df.a.b.dtypes tm.assert_series_equal(result, expected) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 13c8987c66977..ebe118252c8cf 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -272,7 +272,7 @@ def test_to_datetime_format_weeks(self, cache): [ "%Y-%m-%d %H:%M:%S %Z", ["2010-01-01 12:00:00 UTC"] * 2, - [pd.Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2, + [Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2, ], [ "%Y-%m-%d %H:%M:%S %Z", @@ -282,37 +282,37 @@ def test_to_datetime_format_weeks(self, cache): "2010-01-01 12:00:00 US/Pacific", ], [ - pd.Timestamp("2010-01-01 12:00:00", tz="UTC"), - pd.Timestamp("2010-01-01 12:00:00", tz="GMT"), - pd.Timestamp("2010-01-01 12:00:00", tz="US/Pacific"), + Timestamp("2010-01-01 12:00:00", tz="UTC"), + Timestamp("2010-01-01 12:00:00", tz="GMT"), + Timestamp("2010-01-01 12:00:00", tz="US/Pacific"), ], ], [ "%Y-%m-%d %H:%M:%S%z", ["2010-01-01 12:00:00+0100"] * 2, - [pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2, + [Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2, ], [ "%Y-%m-%d %H:%M:%S %z", ["2010-01-01 12:00:00 +0100"] * 2, - [pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2, + [Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2, ], [ "%Y-%m-%d %H:%M:%S %z", ["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"], [ - pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)), - pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)), + Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)), + Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)), ], ], [ "%Y-%m-%d %H:%M:%S %z", ["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"], [ - pd.Timestamp( + Timestamp( "2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0) ), # pytz coerces to UTC - pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)), + Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)), ], ], ], @@ -340,7 +340,7 @@ def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self): fmt = "%Y-%m-%d %H:%M:%S %z" result = pd.to_datetime(dates, format=fmt, utc=True) - expected = pd.DatetimeIndex(expected_dates) + expected = DatetimeIndex(expected_dates) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -359,7 +359,7 @@ def test_to_datetime_parse_timezone_keeps_name(self): fmt = "%Y-%m-%d %H:%M:%S %z" arg = Index(["2010-01-01 12:00:00 Z"], name="foo") result = pd.to_datetime(arg, format=fmt) - expected = pd.DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo") + expected = DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo") tm.assert_index_equal(result, expected) @@ -528,8 +528,8 @@ def test_to_datetime_today(self): pdtoday = pd.to_datetime("today") pdtoday2 = pd.to_datetime(["today"])[0] - tstoday = pd.Timestamp("today") - tstoday2 = pd.Timestamp.today() + tstoday = Timestamp("today") + tstoday2 = Timestamp.today() # These should all be equal with infinite perf; this gives # a generous margin of 10 seconds @@ -590,7 +590,7 @@ def test_to_datetime_array_of_dt64s(self, cache, unit): # an array that is equal to Timestamp() parsing tm.assert_index_equal( pd.to_datetime(dts, cache=cache), - pd.DatetimeIndex([Timestamp(x).asm8 for x in dts]), + DatetimeIndex([Timestamp(x).asm8 for x in dts]), ) # A list of datetimes where the last one is out of bounds @@ -602,7 +602,7 @@ def test_to_datetime_array_of_dt64s(self, cache, unit): tm.assert_index_equal( pd.to_datetime(dts_with_oob, errors="coerce", cache=cache), - pd.DatetimeIndex( + DatetimeIndex( [Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30 + [pd.NaT], ), @@ -622,8 +622,8 @@ def test_to_datetime_tz(self, cache): # xref 8260 # uniform returns a DatetimeIndex arr = [ - pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), - pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), ] result = pd.to_datetime(arr, cache=cache) expected = DatetimeIndex( @@ -633,8 +633,8 @@ def test_to_datetime_tz(self, cache): # mixed tzs will raise arr = [ - pd.Timestamp("2013-01-01 13:00:00", tz="US/Pacific"), - pd.Timestamp("2013-01-02 14:00:00", tz="US/Eastern"), + Timestamp("2013-01-01 13:00:00", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00", tz="US/Eastern"), ] msg = ( "Tz-aware datetime.datetime cannot be " @@ -693,8 +693,8 @@ def test_to_datetime_utc_true( # See gh-11934 & gh-6415 data = ["20100102 121314", "20100102 121315"] expected_data = [ - pd.Timestamp("2010-01-02 12:13:14", tz="utc"), - pd.Timestamp("2010-01-02 12:13:15", tz="utc"), + Timestamp("2010-01-02 12:13:14", tz="utc"), + Timestamp("2010-01-02 12:13:15", tz="utc"), ] result = pd.to_datetime( @@ -715,7 +715,7 @@ def test_to_datetime_utc_true_with_series_single_value(self, cache): # GH 15760 UTC=True with Series ts = 1.5e18 result = pd.to_datetime(Series([ts]), utc=True, cache=cache) - expected = Series([pd.Timestamp(ts, tz="utc")]) + expected = Series([Timestamp(ts, tz="utc")]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("cache", [True, False]) @@ -724,7 +724,7 @@ def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): expected_ts = "2013-01-01 01:00:00" data = Series([ts] * 3) result = pd.to_datetime(data, utc=True, cache=cache) - expected = Series([pd.Timestamp(expected_ts, tz="utc")] * 3) + expected = Series([Timestamp(expected_ts, tz="utc")] * 3) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("cache", [True, False]) @@ -736,7 +736,7 @@ def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): ], ) def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype): - expected = Series([pd.Timestamp("2013-01-01 01:00:00", tz="UTC")]) + expected = Series([Timestamp("2013-01-01 01:00:00", tz="UTC")]) result = pd.to_datetime(Series([date], dtype=dtype), utc=True, cache=cache) tm.assert_series_equal(result, expected) @@ -767,7 +767,7 @@ def test_to_datetime_tz_psycopg2(self, cache): tm.assert_index_equal(result, expected) # dtype coercion - i = pd.DatetimeIndex( + i = DatetimeIndex( ["2000-01-01 08:00:00"], tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None), ) @@ -778,9 +778,7 @@ def test_to_datetime_tz_psycopg2(self, cache): tm.assert_index_equal(result, i) result = pd.to_datetime(i, errors="coerce", utc=True, cache=cache) - expected = pd.DatetimeIndex( - ["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]" - ) + expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]") tm.assert_index_equal(result, expected) @pytest.mark.parametrize("cache", [True, False]) @@ -881,7 +879,7 @@ def test_datetime_invalid_index(self, values, format, infer): res = pd.to_datetime( values, errors="coerce", format=format, infer_datetime_format=infer ) - tm.assert_index_equal(res, pd.DatetimeIndex([pd.NaT] * len(values))) + tm.assert_index_equal(res, DatetimeIndex([pd.NaT] * len(values))) msg = ( "is a bad directive in format|" @@ -909,9 +907,9 @@ def test_to_datetime_cache(self, utc, format, constructor): @pytest.mark.parametrize( "listlike", [ - (deque([pd.Timestamp("2010-06-02 09:30:00")] * 51)), - ([pd.Timestamp("2010-06-02 09:30:00")] * 51), - (tuple([pd.Timestamp("2010-06-02 09:30:00")] * 51)), + (deque([Timestamp("2010-06-02 09:30:00")] * 51)), + ([Timestamp("2010-06-02 09:30:00")] * 51), + (tuple([Timestamp("2010-06-02 09:30:00")] * 51)), ], ) def test_no_slicing_errors_in_should_cache(self, listlike): @@ -920,8 +918,8 @@ def test_no_slicing_errors_in_should_cache(self, listlike): def test_to_datetime_from_deque(self): # GH 29403 - result = pd.to_datetime(deque([pd.Timestamp("2010-06-02 09:30:00")] * 51)) - expected = pd.to_datetime([pd.Timestamp("2010-06-02 09:30:00")] * 51) + result = pd.to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51)) + expected = pd.to_datetime([Timestamp("2010-06-02 09:30:00")] * 51) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("utc", [True, None]) @@ -937,7 +935,7 @@ def test_to_datetime_cache_series(self, utc, format): def test_to_datetime_cache_scalar(self): date = "20130101 00:00:00" result = pd.to_datetime(date, cache=True) - expected = pd.Timestamp("20130101 00:00:00") + expected = Timestamp("20130101 00:00:00") assert result == expected @pytest.mark.parametrize( @@ -1052,7 +1050,7 @@ def test_mixed_offsets_with_native_datetime_raises(self): s = Series( [ "nan", - pd.Timestamp("1990-01-01"), + Timestamp("1990-01-01"), "2015-03-14T16:15:14.123-08:00", "2019-03-04T21:56:32.620-07:00", None, @@ -1219,7 +1217,7 @@ def test_unit_mixed(self, cache): # mixed integers/datetimes expected = DatetimeIndex(["2013-01-01", "NaT", "NaT"]) - arr = [pd.Timestamp("20130101"), 1.434692e18, 1.432766e18] + arr = [Timestamp("20130101"), 1.434692e18, 1.432766e18] result = pd.to_datetime(arr, errors="coerce", cache=cache) tm.assert_index_equal(result, expected) @@ -1228,7 +1226,7 @@ def test_unit_mixed(self, cache): pd.to_datetime(arr, errors="raise", cache=cache) expected = DatetimeIndex(["NaT", "NaT", "2013-01-01"]) - arr = [1.434692e18, 1.432766e18, pd.Timestamp("20130101")] + arr = [1.434692e18, 1.432766e18, Timestamp("20130101")] result = pd.to_datetime(arr, errors="coerce", cache=cache) tm.assert_index_equal(result, expected) @@ -1240,7 +1238,7 @@ def test_unit_rounding(self, cache): # GH 14156 & GH 20445: argument will incur floating point errors # but no premature rounding result = pd.to_datetime(1434743731.8770001, unit="s", cache=cache) - expected = pd.Timestamp("2015-06-19 19:55:31.877000192") + expected = Timestamp("2015-06-19 19:55:31.877000192") assert result == expected @pytest.mark.parametrize("cache", [True, False]) @@ -1899,7 +1897,7 @@ def test_infer_datetime_format_tz_name(self, tz_name, offset): s = Series([f"2019-02-02 08:07:13 {tz_name}"]) result = to_datetime(s, infer_datetime_format=True) expected = Series( - [pd.Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))] + [Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))] ) tm.assert_series_equal(result, expected) @@ -1909,9 +1907,9 @@ def test_to_datetime_iso8601_noleading_0s(self, cache): s = Series(["2014-1-1", "2014-2-2", "2015-3-3"]) expected = Series( [ - pd.Timestamp("2014-01-01"), - pd.Timestamp("2014-02-02"), - pd.Timestamp("2015-03-03"), + Timestamp("2014-01-01"), + Timestamp("2014-02-02"), + Timestamp("2015-03-03"), ] ) tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected) @@ -2046,7 +2044,7 @@ def test_parsers(self, date_str, expected, cache): for res in [result1, result2]: assert res == expected for res in [result3, result4, result6, result8, result9]: - exp = DatetimeIndex([pd.Timestamp(expected)]) + exp = DatetimeIndex([Timestamp(expected)]) tm.assert_index_equal(res, exp) # these really need to have yearfirst, but we don't support @@ -2242,7 +2240,7 @@ def units_from_epochs(): def epochs(epoch_1960, request): """Timestamp at 1960-01-01 in various forms. - * pd.Timestamp + * Timestamp * datetime.datetime * numpy.datetime64 * str @@ -2270,7 +2268,7 @@ def test_to_basic(self, julian_dates): result = Series(pd.to_datetime(julian_dates, unit="D", origin="julian")) expected = Series( - pd.to_datetime(julian_dates - pd.Timestamp(0).to_julian_date(), unit="D") + pd.to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit="D") ) tm.assert_series_equal(result, expected)