Skip to content

CLN: TODOs and FIXMEs #44479

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Nov 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -1466,8 +1466,6 @@ def max(self, *, axis: int | None = None, skipna: bool = True, **kwargs):
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max((), kwargs)
nv.validate_minmax_axis(axis, self.ndim)

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/masked.py
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,7 @@ def value_counts(self, dropna: bool = True) -> Series:
data = self._data[~self._mask]
value_counts = Index(data).value_counts()

# TODO(extension)
# TODO(ExtensionIndex)
# if we have allow Index to hold an ExtensionArray
# this is easier
index = value_counts.index._values.astype(object)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1039,7 +1039,7 @@ def _wrap_applied_output_series(
key_index,
) -> DataFrame | Series:
# this is to silence a DeprecationWarning
# TODO: Remove when default dtype of empty Series is object
# TODO(2.0): Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -3337,7 +3337,8 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, axis=0
Series or DataFrame
Percentage changes within each group.
"""
# TODO: Remove this conditional for SeriesGroupBy when GH#23918 is fixed
# TODO(GH#23918): Remove this conditional for SeriesGroupBy when
# GH#23918 is fixed
if freq is not None or axis != 0:
return self.apply(
lambda x: x.pct_change(
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -868,8 +868,8 @@ def result_arraylike(self) -> ArrayLike:
Analogous to result_index, but returning an ndarray/ExtensionArray
allowing us to retain ExtensionDtypes not supported by Index.
"""
# TODO: once Index supports arbitrary EAs, this can be removed in favor
# of result_index
# TODO(ExtensionIndex): once Index supports arbitrary EAs, this can
# be removed in favor of result_index
if len(self.groupings) == 1:
return self.groupings[0].group_arraylike

Expand Down
2 changes: 0 additions & 2 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,6 @@
operate_blockwise,
)

# TODO: flexible with index=None and/or items=None

T = TypeVar("T", bound="BaseBlockManager")


Expand Down
26 changes: 13 additions & 13 deletions pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1781,16 +1781,20 @@ def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
# We need to define mask before masking NaTs
mask = isna(values)

if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = values.view("i8")
y[mask] = lib.i8max
changed = True
else:
y = values
changed = False
y = values.view("i8")
# Note: the accum_func comparison fails as an "is" comparison
changed = accum_func == np.minimum.accumulate

try:
if changed:
y[mask] = lib.i8max

result = accum_func(y, axis=0)
finally:
if changed:
# restore NaT elements
y[mask] = iNaT

result = accum_func(y.view("i8"), axis=0)
if skipna:
result[mask] = iNaT
elif accum_func == np.minimum.accumulate:
Expand All @@ -1800,10 +1804,6 @@ def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT

if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?

if isinstance(values.dtype, np.dtype):
result = result.view(orig_dtype)
else:
Expand Down
3 changes: 2 additions & 1 deletion pandas/io/sas/sas7bdat.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,13 +103,14 @@ class _Column:
col_id: int
name: str | bytes
label: str | bytes
format: str | bytes # TODO: i think allowing bytes is from py2 days
format: str | bytes
ctype: bytes
length: int

def __init__(
self,
col_id: int,
# These can be bytes when convert_header_text is False
name: str | bytes,
label: str | bytes,
format: str | bytes,
Expand Down
3 changes: 0 additions & 3 deletions pandas/io/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -2158,9 +2158,6 @@ def to_sql(
table.insert(chunksize, method)

def has_table(self, name: str, schema: str | None = None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)

wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
Expand Down
11 changes: 4 additions & 7 deletions pandas/tests/apply/test_series_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -794,18 +794,15 @@ def test_apply_to_timedelta():
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# FIXME: dont leave commented-out
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
tm.assert_series_equal(Series(a), b)

list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]

a = pd.to_timedelta(list_of_strings) # noqa
a = pd.to_timedelta(list_of_strings)
with tm.assert_produces_warning(FutureWarning, match="Inferring timedelta64"):
ser = Series(list_of_strings)
b = ser.apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
b = ser.apply(pd.to_timedelta)
tm.assert_series_equal(Series(a), b)


@pytest.mark.parametrize(
Expand Down
19 changes: 9 additions & 10 deletions pandas/tests/arrays/floating/test_arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,17 +142,16 @@ def test_error_invalid_values(data, all_arithmetic_operators):
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))

if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
msg = "|".join(
[
"can only perform ops with numeric values",
"cannot perform .* with this index type: DatetimeArray",
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
"with DatetimeArray is no longer supported. *",
]
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))


# Various
Expand Down
19 changes: 9 additions & 10 deletions pandas/tests/arrays/integer/test_arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,17 +179,16 @@ def test_error_invalid_values(data, all_arithmetic_operators):
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))

if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
msg = "|".join(
[
"can only perform ops with numeric values",
"cannot perform .* with this index type: DatetimeArray",
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
"with DatetimeArray is no longer supported. *",
]
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))


# Various
Expand Down
10 changes: 7 additions & 3 deletions pandas/tests/arrays/test_datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,13 @@ def test_cmp_dt64_arraylike_tznaive(self, comparison_op):

result = op(arr, arr)
tm.assert_numpy_array_equal(result, expected)
for other in [right, np.array(right)]:
# TODO: add list and tuple, and object-dtype once those
# are fixed in the constructor
for other in [
right,
np.array(right),
list(right),
tuple(right),
right.astype(object),
]:
result = op(arr, other)
tm.assert_numpy_array_equal(result, expected)

Expand Down
41 changes: 19 additions & 22 deletions pandas/tests/frame/test_arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -925,8 +925,8 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements):
(operator.mul, "bool"),
}

e = DummyElement(value, dtype)
s = DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
elem = DummyElement(value, dtype)
df = DataFrame({"A": [elem.value, elem.value]}, dtype=elem.dtype)

invalid = {
(operator.pow, "<M8[ns]"),
Expand Down Expand Up @@ -960,7 +960,7 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements):

with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(warn):
op(s, e.value)
op(df, elem.value)

elif (op, dtype) in skip:

Expand All @@ -971,19 +971,17 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements):
else:
warn = None
with tm.assert_produces_warning(warn):
op(s, e.value)
op(df, elem.value)

else:
msg = "operator '.*' not implemented for .* dtypes"
with pytest.raises(NotImplementedError, match=msg):
op(s, e.value)
op(df, elem.value)

else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
with tm.assert_produces_warning(None):
result = op(s, e.value).dtypes
expected = op(s, value).dtypes
result = op(df, elem.value).dtypes
expected = op(df, value).dtypes
tm.assert_series_equal(result, expected)


Expand Down Expand Up @@ -1240,9 +1238,7 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")

def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
def test_combine_series(self, float_frame, mixed_float_frame, mixed_int_frame):

# Series
series = float_frame.xs(float_frame.index[0])
Expand Down Expand Up @@ -1272,17 +1268,18 @@ def test_combine_series(
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype={"C": None})

# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int

# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = {"A": 'int64', "B": 'float64', "C":
# 'int64', "D": 'int64'})
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = {"A": 'int32', "B": 'float64', "C":
# 'int32', "D": 'int64'})
# these used to raise with numexpr as we are adding an int64 to an
# uint64....weird vs int
added = mixed_int_frame + (100 * series).astype("int64")
_check_mixed_int(
added, dtype={"A": "int64", "B": "float64", "C": "int64", "D": "int64"}
)
added = mixed_int_frame + (100 * series).astype("int32")
_check_mixed_int(
added, dtype={"A": "int32", "B": "float64", "C": "int32", "D": "int64"}
)

def test_combine_timeseries(self, datetime_frame):
# TimeSeries
ts = datetime_frame["A"]

Expand Down
1 change: 0 additions & 1 deletion pandas/tests/indexes/datetimes/methods/test_insert.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,6 @@ def test_insert_mismatched_types_raises(self, tz_aware_fixture, item):
result = dti.insert(1, item)

if isinstance(item, np.ndarray):
# FIXME: without doing .item() here this segfaults
assert item.item() == 0
expected = Index([dti[0], 0] + list(dti[1:]), dtype=object, name=9)
else:
Expand Down
5 changes: 1 addition & 4 deletions pandas/tests/indexes/multi/test_formats.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,7 @@ def test_unicode_repr_issues(self):
index = MultiIndex(levels=levels, codes=codes)

repr(index.levels)

# FIXME: dont leave commented-out
# NumPy bug
# repr(index.get_level_values(1))
repr(index.get_level_values(1))

def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
Expand Down
6 changes: 3 additions & 3 deletions pandas/tests/series/methods/test_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,15 +108,15 @@ def test_convert(self):
result = ser._convert(datetime=True)
tm.assert_series_equal(result, expected)

# preserver if non-object
# preserve if non-object
ser = Series([1], dtype="float32")
result = ser._convert(datetime=True)
tm.assert_series_equal(result, ser)

# FIXME: dont leave commented-out
# res = ser.copy()
# r[0] = np.nan
# result = res._convert(convert_dates=True,convert_numeric=False)
# res[0] = np.nan
# result = res._convert(datetime=True, numeric=False)
# assert result.dtype == 'M8[ns]'

def test_convert_no_arg_error(self):
Expand Down
12 changes: 0 additions & 12 deletions pandas/tests/series/test_arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,13 +244,6 @@ def test_add_corner_cases(self, datetime_series):
result = empty + empty.copy()
assert len(result) == 0

# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas

def test_add_float_plus_int(self, datetime_series):
# float + int
int_ts = datetime_series.astype(int)[:-5]
Expand Down Expand Up @@ -613,11 +606,6 @@ def test_comparison_operators_with_nas(self, comparison_op):

tm.assert_series_equal(result, expected)

# FIXME: dont leave commented-out
# result = comparison_op(val, ser)
# expected = comparison_op(val, ser.dropna()).reindex(ser.index)
# tm.assert_series_equal(result, expected)

def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
Expand Down
Loading