Skip to content

TYP: Index.join, get_indexer, reindex #40390

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
130 changes: 120 additions & 10 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
TypeVar,
Union,
cast,
overload,
)
import warnings

Expand Down Expand Up @@ -165,6 +166,8 @@
)

if TYPE_CHECKING:
from typing import Literal

from pandas import (
CategoricalIndex,
DataFrame,
Expand Down Expand Up @@ -3380,7 +3383,7 @@ def get_loc(self, key, method=None, tolerance=None):
@final
def get_indexer(
self, target, method=None, limit=None, tolerance=None
) -> np.ndarray:
) -> np.ndarray[np.intp]:

method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
Expand All @@ -3405,7 +3408,7 @@ def get_indexer(

def _get_indexer(
self, target: Index, method=None, limit=None, tolerance=None
) -> np.ndarray:
) -> np.ndarray[np.intp]:
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)

Expand Down Expand Up @@ -3735,7 +3738,9 @@ def _validate_can_reindex(self, indexer: np.ndarray) -> None:
if not self._index_as_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")

def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> Tuple[Index, Optional[np.ndarray[np.intp]]]:
"""
Create index with target's values.

Expand Down Expand Up @@ -3863,9 +3868,31 @@ def _reindex_non_unique(self, target):
# --------------------------------------------------------------------
# Join Methods

@overload
def join(
self,
other: Index,
how: str_t = "left",
level=None,
return_indexers: Literal[True] = ...,
sort: bool = False,
) -> Tuple[Index, Optional[np.ndarray[np.intp]], Optional[np.ndarray[np.intp]]]:
...

@overload
def join(
self,
other,
other: Index,
how: str_t = "left",
level=None,
return_indexers: Literal[False] = ...,
sort: bool = False,
) -> Index:
...

def join(
self,
other: Index,
how: str_t = "left",
level=None,
return_indexers: bool = False,
Expand Down Expand Up @@ -3915,15 +3942,15 @@ def join(
if len(other) == 0 and how in ("left", "outer"):
join_index = self._view()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
rindexer = -1 * np.ones(len(join_index), dtype=np.intp)
return join_index, None, rindexer
else:
return join_index

if len(self) == 0 and how in ("right", "outer"):
join_index = other._view()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
lindexer = -1 * np.ones(len(join_index), dtype=np.intp)
return join_index, lindexer, None
else:
return join_index
Expand Down Expand Up @@ -3996,16 +4023,32 @@ def join(
lindexer = None
else:
lindexer = self.get_indexer(join_index)
lindexer = ensure_platform_int(lindexer)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
rindexer = ensure_platform_int(rindexer)
return join_index, lindexer, rindexer
else:
return join_index

@overload
def _join_multi(
self, other: Index, how: str = "left", return_indexers: Literal[True] = ...
) -> Tuple[
MultiIndex, Optional[np.ndarray[np.intp]], Optional[np.ndarray[np.intp]]
]:
...

@overload
def _join_multi(
self, other: Index, how: str = "left", return_indexers: Literal[False] = ...
) -> MultiIndex:
...

@final
def _join_multi(self, other, how, return_indexers=True):
def _join_multi(self, other: Index, how: str, return_indexers: bool = True):
from pandas.core.indexes.multi import MultiIndex
from pandas.core.reshape.merge import restore_dropped_levels_multijoin

Expand Down Expand Up @@ -4083,8 +4126,22 @@ def _join_multi(self, other, how, return_indexers=True):
return result[0], result[2], result[1]
return result

@overload
def _join_non_unique(
self, other: Index, how: str = "left", return_indexers: Literal[True] = ...
) -> Tuple[Index, Optional[np.ndarray[np.intp]], Optional[np.ndarray[np.intp]]]:
...

@overload
def _join_non_unique(
self, other: Index, how: str = "left", return_indexers: Literal[False] = ...
) -> Index:
...

@final
def _join_non_unique(self, other, how="left", return_indexers=False):
def _join_non_unique(
self, other: Index, how: str = "left", return_indexers: bool = False
):
from pandas.core.reshape.merge import get_join_indexers

# We only get here if dtypes match
Expand All @@ -4111,9 +4168,38 @@ def _join_non_unique(self, other, how="left", return_indexers=False):
else:
return join_index

@overload
def _join_level(
self,
other: Index,
level,
how: str = "left",
return_indexers: bool = False,
keep_order: bool = True,
) -> Tuple[
MultiIndex, Optional[np.ndarray[np.intp]], Optional[np.ndarray[np.intp]]
]:
...

@overload
def _join_level(
self,
other: Index,
level,
how: str = "left",
return_indexers: bool = False,
keep_order: bool = True,
) -> MultiIndex:
...

@final
def _join_level(
self, other, level, how="left", return_indexers=False, keep_order=True
self,
other: Index,
level,
how: str = "left",
return_indexers: bool = False,
keep_order: bool = True,
):
"""
The join method *only* affects the level of the resulting
Expand Down Expand Up @@ -4253,8 +4339,20 @@ def _get_leaf_sorter(labels):
else:
return join_index

@overload
def _join_monotonic(
self, other, how: str = "left", return_indexers: Literal[True] = ...
) -> Tuple[Index, Optional[np.ndarray[np.intp]], Optional[np.ndarray[np.intp]]]:
...

@overload
def _join_monotonic(
self, other, how: str = "left", return_indexers: Literal[False] = ...
) -> Index:
...

@final
def _join_monotonic(self, other, how="left", return_indexers=False):
def _join_monotonic(self, other, how: str = "left", return_indexers: bool = False):
# We only get here with matching dtypes
assert other.dtype == self.dtype

Expand Down Expand Up @@ -5236,6 +5334,18 @@ def get_indexer_for(self, target, **kwargs):
indexer, _ = self.get_indexer_non_unique(target)
return indexer

@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[True] = ...
) -> np.ndarray[np.intp]:
...

@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[False] = ...
) -> Tuple[np.ndarray[np.intp], np.ndarray[np.intp]]:
...

@final
def _get_indexer_non_comparable(self, target: Index, method, unique: bool = True):
"""
Expand Down
19 changes: 13 additions & 6 deletions pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
Hashable,
List,
Optional,
Tuple,
)
import warnings

Expand Down Expand Up @@ -386,15 +387,17 @@ def unique(self, level=None):
# of result, not self.
return type(self)._simple_new(result, name=self.name)

def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> Tuple[Index, Optional[np.ndarray[np.intp]]]:
"""
Create index with target's values (move/add/delete values as necessary)

Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
indexer : np.ndarray[np.intp] or None
Indices of output values in original index

"""
Expand Down Expand Up @@ -484,19 +487,23 @@ def _maybe_cast_indexer(self, key) -> int:

def _get_indexer(
self, target: Index, method=None, limit=None, tolerance=None
) -> np.ndarray:
) -> np.ndarray[np.intp]:

if self.equals(target):
return np.arange(len(self), dtype="intp")

return self._get_indexer_non_unique(target._values)[0]

@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
def get_indexer_non_unique(
self, target
) -> Tuple[np.ndarray[np.intp], np.ndarray[np.intp]]:
target = ibase.ensure_index(target)
return self._get_indexer_non_unique(target._values)

def _get_indexer_non_unique(self, values: ArrayLike):
def _get_indexer_non_unique(
self, values: ArrayLike
) -> Tuple[np.ndarray[np.intp], np.ndarray[np.intp]]:
"""
get_indexer_non_unique but after unrapping the target Index object.
"""
Expand All @@ -515,7 +522,7 @@ def _get_indexer_non_unique(self, values: ArrayLike):
codes = self.categories.get_indexer(values)

indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
return ensure_platform_int(indexer), ensure_platform_int(missing)

@doc(Index._convert_list_indexer)
def _convert_list_indexer(self, keyarr):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -828,7 +828,7 @@ def _union(self, other, sort):

def join(
self,
other,
other: Index,
how: str = "left",
level=None,
return_indexers: bool = False,
Expand Down
10 changes: 7 additions & 3 deletions pandas/core/indexes/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ def _get_indexer(
method: Optional[str] = None,
limit: Optional[int] = None,
tolerance: Optional[Any] = None,
) -> np.ndarray:
) -> np.ndarray[np.intp]:

if isinstance(target, IntervalIndex):
# equal indexes -> 1:1 positional match
Expand Down Expand Up @@ -732,7 +732,9 @@ def _get_indexer(
return ensure_platform_int(indexer)

@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target: Index) -> Tuple[np.ndarray, np.ndarray]:
def get_indexer_non_unique(
self, target: Index
) -> Tuple[np.ndarray[np.intp], np.ndarray[np.intp]]:
target = ensure_index(target)

if isinstance(target, IntervalIndex) and not self._should_compare(target):
Expand All @@ -751,7 +753,9 @@ def get_indexer_non_unique(self, target: Index) -> Tuple[np.ndarray, np.ndarray]

return ensure_platform_int(indexer), ensure_platform_int(missing)

def _get_indexer_pointwise(self, target: Index) -> Tuple[np.ndarray, np.ndarray]:
def _get_indexer_pointwise(
self, target: Index
) -> Tuple[np.ndarray[np.intp], np.ndarray[np.intp]]:
"""
pointwise implementation for get_indexer and get_indexer_non_unique.
"""
Expand Down
8 changes: 5 additions & 3 deletions pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -2509,15 +2509,17 @@ def sortlevel(

return new_index, indexer

def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> Tuple[Index, Optional[np.ndarray[np.intp]]]:
"""
Create index with target's values (move/add/delete values as necessary)

Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.

"""
Expand Down Expand Up @@ -2672,7 +2674,7 @@ def _get_partial_string_timestamp_match_key(self, key):

def _get_indexer(
self, target: Index, method=None, limit=None, tolerance=None
) -> np.ndarray:
) -> np.ndarray[np.intp]:

# empty indexer
if not len(target):
Expand Down
4 changes: 3 additions & 1 deletion pandas/core/indexes/range.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,9 @@ def get_loc(self, key, method=None, tolerance=None):
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)

def _get_indexer(self, target: Index, method=None, limit=None, tolerance=None):
def _get_indexer(
self, target: Index, method=None, limit=None, tolerance=None
) -> np.ndarray[np.intp]:
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
Expand Down
Loading