Skip to content

TYP: pandas/core/missing.py #38339

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 31 commits into from
Closed
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
830fa00
add type hints
arw2019 Dec 5, 2020
605dc3c
review: remove assert
arw2019 Dec 7, 2020
e77c940
merge master
arw2019 Dec 7, 2020
f2d5ec4
typo
arw2019 Dec 7, 2020
e83904f
add isna check
arw2019 Dec 7, 2020
71caeeb
better error msg when interp method not string
arw2019 Dec 7, 2020
8fbbd47
improve docstring
arw2019 Dec 7, 2020
4474ada
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Dec 7, 2020
575c227
remove Optional
arw2019 Dec 7, 2020
b19896b
use Axis TypeVar
arw2019 Dec 7, 2020
5036ee1
more hints
arw2019 Dec 8, 2020
c0c4338
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Dec 8, 2020
2a31823
review comments
arw2019 Dec 9, 2020
4fb893b
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Dec 9, 2020
95a734b
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Dec 10, 2020
4aeec70
review comment
arw2019 Dec 10, 2020
d67977d
review comment: values_to_mask
arw2019 Dec 10, 2020
24f418a
review comments: mask_missing/infer_dtype_from_array
arw2019 Dec 11, 2020
aeb0b82
typo
arw2019 Dec 11, 2020
25d0051
typo
arw2019 Dec 11, 2020
bbd25ed
review comment
arw2019 Dec 11, 2020
785d27c
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Dec 11, 2020
c2d6467
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Dec 14, 2020
b505de5
review comment
arw2019 Dec 14, 2020
e39c152
docstring fix
arw2019 Dec 14, 2020
cb82c9a
review comments
arw2019 Dec 14, 2020
65effed
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Dec 15, 2020
2fa64bd
Merge branch 'master' of https://github.com/pandas-dev/pandas into ty…
arw2019 Jan 5, 2021
a54a02f
merge master
arw2019 Feb 21, 2021
315822c
TYP: infer_dtype_from_array
arw2019 Feb 21, 2021
df4b70a
minimize diff
arw2019 Feb 21, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,14 @@
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar
from pandas._typing import (
AnyArrayLike,
ArrayLike,
Dtype,
DtypeObj,
PandasScalar,
Scalar,
)
from pandas.util._validators import validate_bool_kwarg

from pandas.core.dtypes.common import (
Expand Down Expand Up @@ -834,8 +841,8 @@ def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:


def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
arr: "Union[ArrayLike, Series, PandasScalar]", pandas_dtype: bool = False
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think for these just use AnyArrayLike

) -> "Tuple[DtypeObj, Union[ArrayLike, Series]]":
"""
Infer the dtype from an array.

Expand Down Expand Up @@ -883,7 +890,7 @@ def infer_dtype_from_array(
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
return np.dtype(np.object_), arr

arr = np.asarray(arr)
return arr.dtype, arr
Expand Down
155 changes: 107 additions & 48 deletions pandas/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@
Routines for filling missing data.
"""
from functools import partial
from typing import TYPE_CHECKING, Any, List, Optional, Set, Union
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Set, Tuple, Union

import numpy as np

from pandas._libs import algos, lib
from pandas._typing import ArrayLike, Axis, DtypeObj
from pandas._typing import ArrayLike, DtypeObj, IndexLabel, Scalar
from pandas.compat._optional import import_optional_dependency

from pandas.core.dtypes.cast import infer_dtype_from_array
Expand All @@ -20,10 +20,12 @@
from pandas.core.dtypes.missing import isna

if TYPE_CHECKING:
from pandas import Index
from pandas import Index, Series


def mask_missing(arr: ArrayLike, values_to_mask) -> np.ndarray:
def mask_missing(
arr: ArrayLike, values_to_mask: "Union[ArrayLike, Scalar, Series]"
) -> np.ndarray:
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
Expand Down Expand Up @@ -61,7 +63,9 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> np.ndarray:
return mask


def clean_fill_method(method, allow_nearest: bool = False):
def clean_fill_method(
method: Optional[str], allow_nearest: bool = False
) -> Optional[str]:
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
Expand Down Expand Up @@ -120,7 +124,7 @@ def clean_interp_method(method: str, **kwargs) -> str:
return method


def find_valid_index(values, how: str):
def find_valid_index(values: ArrayLike, how: str) -> Optional[int]:
"""
Retrieves the index of the first valid value.

Expand Down Expand Up @@ -160,15 +164,15 @@ def find_valid_index(values, how: str):
def interpolate_1d(
xvalues: "Index",
yvalues: np.ndarray,
method: Optional[str] = "linear",
method: str = "linear",
limit: Optional[int] = None,
limit_direction: str = "forward",
limit_area: Optional[str] = None,
fill_value: Optional[Any] = None,
bounds_error: bool = False,
order: Optional[int] = None,
**kwargs,
):
) -> np.ndarray:
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Expand Down Expand Up @@ -218,8 +222,13 @@ def interpolate_1d(

# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(find_valid_index(yvalues, "first")))
end_nans = set(range(1 + find_valid_index(yvalues, "last"), len(valid)))

start_nan_idx = find_valid_index(yvalues, "first")
start_nans = set() if start_nan_idx is None else set(range(start_nan_idx))

end_nan_idx = find_valid_index(yvalues, "last")
end_nans = set() if end_nan_idx is None else set(range(1 + end_nan_idx, len(valid)))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this fixing a bug in the case where end_nan_idx is None?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think so because we special case all nans/no nans at the top of the the method


mid_nans = all_nans - start_nans - end_nans

# Like the sets above, preserve_nans contains indices of invalid values,
Expand Down Expand Up @@ -292,8 +301,15 @@ def interpolate_1d(


def _interpolate_scipy_wrapper(
x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs
):
x: np.ndarray,
y: np.ndarray,
new_x: Union[Scalar, np.ndarray],
method: str,
fill_value: Optional[Scalar] = None,
bounds_error: bool = False,
order: Optional[int] = None,
**kwargs,
) -> np.ndarray:
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
Expand Down Expand Up @@ -333,15 +349,14 @@ def _interpolate_scipy_wrapper(
"polynomial",
]
if method in interp1d_methods:
if method == "polynomial":
method = order
kind = order if method == "polynomial" else method
terp = interpolate.interp1d(
x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
x, y, kind=kind, fill_value=fill_value, bounds_error=bounds_error
)
new_y = terp(new_x)
elif method == "spline":
# GH #10633, #24014
if isna(order) or (order <= 0):
if order is None or isna(order) or order <= 0:
raise ValueError(
f"order needs to be specified and greater than 0; got order: {order}"
)
Expand All @@ -356,12 +371,23 @@ def _interpolate_scipy_wrapper(
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)

if isinstance(method, str):
alt_method = alt_methods[method]
new_y = alt_method(x, y, new_x, **kwargs)
else:
raise ValueError(f"{method} is not a valid interp method")
return new_y


def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
def _from_derivatives(
xi: np.ndarray,
yi: np.ndarray,
x: Union[Scalar, ArrayLike],
order: Optional[Union[int, List[int]]] = None,
der: Union[int, List[int]] = 0,
extrapolate: bool = False,
) -> np.ndarray:
"""
Convenience function for interpolate.BPoly.from_derivatives.

Expand All @@ -374,15 +400,16 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
x : scalar or array_like
order: None or int or array_like of ints, default: None
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
der : int or list, default: 0
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This number includes the function
value as 0th derivative.
extrapolate : bool, optional
extrapolate : bool, default False
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the alignmnet here is off

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed

Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.

Expand All @@ -404,7 +431,13 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
return m(x)


def _akima_interpolate(xi, yi, x, der=0, axis=0):
def _akima_interpolate(
xi: np.ndarray,
yi: np.ndarray,
x: Union[Scalar, ArrayLike],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think these are just np.ndarray right?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and similar in almost all places that you are using Union[Scalar, ArrayLike] unless you can reveal_type that this is not the case

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right, they're always ndarray except in _interpolate_scipy_wrapper. It's because in _interpolate_scipy_wrapper we cast any scalar input to ndarray and these functions always get called from there:

new_x = np.asarray(new_x)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed these, and docstrings too

der: int = 0,
axis: int = 0,
) -> Union[Scalar, ArrayLike]:
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
Expand All @@ -414,9 +447,9 @@ def _akima_interpolate(xi, yi, x, der=0, axis=0):

Parameters
----------
xi : array_like
xi : np.ndarray
A sorted list of x-coordinates, of length N.
yi : array_like
yi : np.ndarray
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
Expand Down Expand Up @@ -447,7 +480,14 @@ def _akima_interpolate(xi, yi, x, der=0, axis=0):
return P(x, nu=der)


def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolate=None):
def _cubicspline_interpolate(
xi: np.ndarray,
yi: np.ndarray,
x: Union[ArrayLike, Scalar],
axis: int = 0,
bc_type: Union[str, Tuple] = "not-a-knot",
extrapolate: Optional[Union[bool, str]] = None,
) -> Union[ArrayLike, Scalar]:
"""
Convenience function for cubic spline data interpolator.

Expand Down Expand Up @@ -555,6 +595,8 @@ def _interpolate_with_limit_area(
first = find_valid_index(values, "first")
last = find_valid_index(values, "last")

assert first is not None and last is not None

values = interpolate_2d(
values,
method=method,
Expand All @@ -572,12 +614,12 @@ def _interpolate_with_limit_area(


def interpolate_2d(
values,
values: np.ndarray,
method: str = "pad",
axis: Axis = 0,
axis: int = 0,
limit: Optional[int] = None,
limit_area: Optional[str] = None,
):
) -> np.ndarray:
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
Expand Down Expand Up @@ -623,7 +665,10 @@ def interpolate_2d(
raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
values = values.reshape(tuple((1,) + values.shape))

method = clean_fill_method(method)
method_cleaned = clean_fill_method(method)
assert isinstance(method_cleaned, str)
method = method_cleaned

tvalues = transf(values)
if method == "pad":
result = _pad_2d(tvalues, limit=limit)
Expand All @@ -642,7 +687,9 @@ def interpolate_2d(
return result


def _cast_values_for_fillna(values, dtype: DtypeObj, has_mask: bool):
def _cast_values_for_fillna(
values: ArrayLike, dtype: DtypeObj, has_mask: bool
) -> ArrayLike:
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
Expand All @@ -661,34 +708,41 @@ def _cast_values_for_fillna(values, dtype: DtypeObj, has_mask: bool):
return values


def _fillna_prep(values, mask=None):
def _fillna_prep(
values: np.ndarray, mask: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, np.ndarray]:
# boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
dtype = values.dtype

has_mask = mask is not None
if not has_mask:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)

values = _cast_values_for_fillna(values, dtype, has_mask)
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values) if mask is None else mask

values = _cast_values_for_fillna(values, values.dtype, has_mask)
mask = mask.view(np.uint8)

return values, mask


def _pad_1d(values, limit=None, mask=None):
def _pad_1d(
values: np.ndarray, limit: Optional[int] = None, mask: Optional[np.ndarray] = None
) -> np.ndarray:
values, mask = _fillna_prep(values, mask)
algos.pad_inplace(values, mask, limit=limit)
return values


def _backfill_1d(values, limit=None, mask=None):
def _backfill_1d(
values: np.ndarray, limit: Optional[int] = None, mask: Optional[np.ndarray] = None
) -> np.ndarray:
values, mask = _fillna_prep(values, mask)
algos.backfill_inplace(values, mask, limit=limit)
return values


def _pad_2d(values, limit=None, mask=None):
def _pad_2d(
values: np.ndarray, limit: Optional[int] = None, mask: Optional[np.ndarray] = None
) -> np.ndarray:
values, mask = _fillna_prep(values, mask)

if np.all(values.shape):
Expand All @@ -699,7 +753,9 @@ def _pad_2d(values, limit=None, mask=None):
return values


def _backfill_2d(values, limit=None, mask=None):
def _backfill_2d(
values: np.ndarray, limit: Optional[int] = None, mask: Optional[np.ndarray] = None
) -> np.ndarray:
values, mask = _fillna_prep(values, mask)

if np.all(values.shape):
Expand All @@ -713,16 +769,19 @@ def _backfill_2d(values, limit=None, mask=None):
_fill_methods = {"pad": _pad_1d, "backfill": _backfill_1d}


def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def get_fill_func(method: str) -> Callable:
method_cleaned = clean_fill_method(method)
assert isinstance(method_cleaned, str)
return _fill_methods[method_cleaned]


def clean_reindex_fill_method(method):
def clean_reindex_fill_method(method: str) -> Optional[str]:
return clean_fill_method(method, allow_nearest=True)


def _interp_limit(invalid, fw_limit, bw_limit):
def _interp_limit(
invalid: np.ndarray, fw_limit: Optional[int], bw_limit: Optional[int]
) -> Set[IndexLabel]:
"""
Get indexers of values that won't be filled
because they exceed the limits.
Expand Down Expand Up @@ -757,7 +816,7 @@ def _interp_limit(invalid, fw_limit, bw_limit):
f_idx = set()
b_idx = set()

def inner(invalid, limit):
def inner(invalid: np.ndarray, limit: int) -> Set[IndexLabel]:
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = set(np.where(windowed)[0] + limit) | set(
Expand Down Expand Up @@ -787,7 +846,7 @@ def inner(invalid, limit):
return f_idx & b_idx


def _rolling_window(a: np.ndarray, window: int):
def _rolling_window(a: np.ndarray, window: int) -> np.ndarray:
"""
[True, True, False, True, False], 2 ->

Expand Down