Skip to content

CLN: assorted cleanups, annotations, de-privatizing #33497

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 17, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/_libs/tslibs/offsets.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,8 @@ cdef _wrap_timedelta_result(result):
"""
if PyDelta_Check(result):
# convert Timedelta back to a Tick
from pandas.tseries.offsets import _delta_to_tick
return _delta_to_tick(result)
from pandas.tseries.offsets import delta_to_tick
return delta_to_tick(result)

return result

Expand Down
6 changes: 3 additions & 3 deletions pandas/core/arrays/period.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
import pandas.core.common as com

from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick
from pandas.tseries.offsets import DateOffset, Tick, delta_to_tick


def _field_accessor(name: str, alias: int, docstring=None):
Expand Down Expand Up @@ -487,7 +487,7 @@ def _time_shift(self, periods, freq=None):
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)

def asfreq(self, freq=None, how="E") -> "PeriodArray":
def asfreq(self, freq=None, how: str = "E") -> "PeriodArray":
"""
Convert the Period Array/Index to the specified frequency `freq`.

Expand Down Expand Up @@ -759,7 +759,7 @@ def raise_on_incompatible(left, right):
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)):
other_freq = right.freqstr
else:
other_freq = _delta_to_tick(Timedelta(right)).freqstr
other_freq = delta_to_tick(Timedelta(right)).freqstr

msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/indexes/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,6 @@ def __new__(cls, data: "Series"):
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
# do all the validation here.
from pandas import Series

if not isinstance(data, ABCSeries):
raise TypeError(
Expand All @@ -438,7 +437,7 @@ def __new__(cls, data: "Series"):

orig = data if is_categorical_dtype(data) else None
if orig is not None:
data = Series(
data = data._constructor(
orig.array,
name=orig.name,
copy=False,
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/indexes/period.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,8 @@

from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs import Period, frequencies as libfrequencies, resolution
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.period import Period
from pandas._typing import DtypeObj, Label
from pandas.util._decorators import Appender, cache_readonly, doc

Expand Down
11 changes: 1 addition & 10 deletions pandas/core/indexes/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,7 @@


@inherit_names(
[
"_box_values",
"__neg__",
"__pos__",
"__abs__",
"total_seconds",
"round",
"floor",
"ceil",
]
["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"]
+ TimedeltaArray._field_ops,
TimedeltaArray,
wrap=True,
Expand Down
9 changes: 5 additions & 4 deletions pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def get_block_values_for_json(self) -> np.ndarray:
"""
This is used in the JSON C code.
"""
# TODO(2DEA): reshape will be unnecessary with 2D EAs
# TODO(EA2D): reshape will be unnecessary with 2D EAs
return np.asarray(self.values).reshape(self.shape)

@property
Expand Down Expand Up @@ -353,6 +353,7 @@ def apply(self, func, **kwargs) -> List["Block"]:
def _split_op_result(self, result) -> List["Block"]:
# See also: split_and_operate
if is_extension_array_dtype(result) and result.ndim > 1:
# TODO(EA2D): unnecessary with 2D EAs
# if we get a 2D ExtensionArray, we need to split it into 1D pieces
nbs = []
for i, loc in enumerate(self.mgr_locs):
Expand Down Expand Up @@ -1560,7 +1561,7 @@ def __init__(self, values, placement, ndim=None):
super().__init__(values, placement, ndim=ndim)

if self.ndim == 2 and len(self.mgr_locs) != 1:
# TODO(2DEA): check unnecessary with 2D EAs
# TODO(EA2D): check unnecessary with 2D EAs
raise AssertionError("block.size != values.size")

@property
Expand Down Expand Up @@ -2307,7 +2308,7 @@ def equals(self, other) -> bool:
def quantile(self, qs, interpolation="linear", axis=0):
naive = self.values.view("M8[ns]")

# kludge for 2D block with 1D values
# TODO(EA2D): kludge for 2D block with 1D values
naive = naive.reshape(self.shape)

blk = self.make_block(naive)
Expand Down Expand Up @@ -2432,7 +2433,7 @@ def f(mask, val, idx):
copy=copy,
)
if isinstance(values, np.ndarray):
# TODO: allow EA once reshape is supported
# TODO(EA2D): allow EA once reshape is supported
values = values.reshape(shape)

return values
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/internals/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import numpy as np

from pandas._libs import internals as libinternals, tslibs
from pandas._libs import NaT, internals as libinternals
from pandas.util._decorators import cache_readonly

from pandas.core.dtypes.cast import maybe_promote
Expand Down Expand Up @@ -395,7 +395,7 @@ def _get_empty_dtype_and_na(join_units):
# GH-25014. We use NaT instead of iNaT, since this eventually
# ends up in DatetimeArray.take, which does not allow iNaT.
dtype = upcast_classes["datetimetz"]
return dtype[0], tslibs.NaT
return dtype[0], NaT
elif "datetime" in upcast_classes:
return np.dtype("M8[ns]"), np.datetime64("NaT", "ns")
elif "timedelta" in upcast_classes:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ def _gotitem(self, key, ndim: int, subset=None):
Parameters
----------
key : string / list of selections
ndim : 1,2
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
Expand Down
8 changes: 6 additions & 2 deletions pandas/tests/arithmetic/test_timedelta64.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,9 @@ def test_subtraction_ops(self):
tm.assert_index_equal(result, expected, check_names=False)

result = dti - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], name="bar")
expected = DatetimeIndex(
["20121231", "20130101", "20130102"], freq="D", name="bar"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this a bugfix with the freq? Worth a whatsnew?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No. ATM assert_index_equal doesnt check for matching freq. Moving towards adding that check, but ATM it breaks about 500 tests. Honestly not sure why this edit made it into this branch.

)
tm.assert_index_equal(result, expected, check_names=False)

result = dt - tdi
Expand Down Expand Up @@ -401,7 +403,9 @@ def _check(result, expected):
_check(result, expected)

result = dti_tz - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern")
expected = DatetimeIndex(
["20121231", "20130101", "20130102"], tz="US/Eastern", freq="D"
)
tm.assert_index_equal(result, expected)

def test_dti_tdi_numeric_ops(self):
Expand Down
6 changes: 1 addition & 5 deletions pandas/tests/series/methods/test_replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,11 +186,7 @@ def check_replace(to_rep, val, expected):
check_replace(tr, v, e)

# test an object with dates + floats + integers + strings
dr = (
pd.date_range("1/1/2001", "1/10/2001", freq="D")
.to_series()
.reset_index(drop=True)
)
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/tseries/offsets/test_ticks.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ def test_apply_ticks():
def test_delta_to_tick():
delta = timedelta(3)

tick = offsets._delta_to_tick(delta)
tick = offsets.delta_to_tick(delta)
assert tick == offsets.Day(3)

td = Timedelta(nanoseconds=5)
tick = offsets._delta_to_tick(td)
tick = offsets.delta_to_tick(td)
assert tick == Nano(5)


Expand Down
4 changes: 2 additions & 2 deletions pandas/tseries/offsets.py
Original file line number Diff line number Diff line change
Expand Up @@ -2548,7 +2548,7 @@ def __add__(self, other):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
return delta_to_tick(self.delta + other.delta)
elif isinstance(other, Period):
return other + self
try:
Expand Down Expand Up @@ -2635,7 +2635,7 @@ def is_anchored(self) -> bool:
return False


def _delta_to_tick(delta: timedelta) -> Tick:
def delta_to_tick(delta: timedelta) -> Tick:
if delta.microseconds == 0 and getattr(delta, "nanoseconds", 0) == 0:
# nanoseconds only for pd.Timedelta
if delta.seconds == 0:
Expand Down