Skip to content

CLN: collected cleanups #42827

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 30, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1221,7 +1221,7 @@ def factorize(self, sort: bool = False, na_sentinel: int | None = -1):
"""

@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
def searchsorted(self, value, side="left", sorter=None) -> npt.NDArray[np.intp]:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)

def drop_duplicates(self, keep="first"):
Expand All @@ -1232,5 +1232,5 @@ def drop_duplicates(self, keep="first"):
@final
def _duplicated(
self, keep: Literal["first", "last", False] = "first"
) -> np.ndarray:
) -> npt.NDArray[np.bool_]:
return duplicated(self._values, keep=keep)
2 changes: 1 addition & 1 deletion pandas/core/construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def extract_array(
>>> extract_array([1, 2, 3])
[1, 2, 3]

For an ndarray-backed Series / Index a PandasArray is returned.
For an ndarray-backed Series / Index the ndarray is returned.

>>> extract_array(pd.Series([1, 2, 3]))
array([1, 2, 3])
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/indexes/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,10 +238,11 @@ def _format_attrs(self):
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
# iterating over _attributes prevents us from doing this for PeriodIndex
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq)
freq = repr(freq) # e.g. D -> 'D'
# Argument 1 to "append" of "list" has incompatible type
# "Tuple[str, Optional[str]]"; expected "Tuple[str, Union[str, int]]"
attrs.append(("freq", freq)) # type: ignore[arg-type]
Expand Down
24 changes: 11 additions & 13 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ def _get_setitem_indexer(self, key):
self._ensure_listlike_indexer(key)

if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
return self._convert_tuple(key)

ax = self.obj._get_axis(0)

Expand All @@ -653,12 +653,12 @@ def _get_setitem_indexer(self, key):

if isinstance(key, tuple):
with suppress(IndexingError):
return self._convert_tuple(key, is_setter=True)
return self._convert_tuple(key)

if isinstance(key, range):
return list(key)

return self._convert_to_indexer(key, axis=0, is_setter=True)
return self._convert_to_indexer(key, axis=0)

def _ensure_listlike_indexer(self, key, axis=None, value=None):
"""
Expand Down Expand Up @@ -755,21 +755,19 @@ def _is_nested_tuple_indexer(self, tup: tuple) -> bool:
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False

def _convert_tuple(self, key, is_setter: bool = False):
def _convert_tuple(self, key):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(
self._convert_to_indexer(key, axis=axis, is_setter=is_setter)
)
keyidx.append(self._convert_to_indexer(key, axis=axis))
else:
keyidx.append(slice(None))
else:
self._validate_key_length(key)
for i, k in enumerate(key):
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
idx = self._convert_to_indexer(k, axis=i)
keyidx.append(idx)

return tuple(keyidx)
Expand Down Expand Up @@ -867,8 +865,8 @@ def _getitem_nested_tuple(self, tup: tuple):
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
if self.name != "loc":
# This should never be reached, but lets be explicit about it
raise ValueError("Too many indices")
# This should never be reached, but let's be explicit about it
raise ValueError("Too many indices") # pragma: no cover
if all(is_hashable(x) or com.is_null_slice(x) for x in tup):
# GH#10521 Series should reduce MultiIndex dimensions instead of
# DataFrame, IndexingError is not raised when slice(None,None,None)
Expand Down Expand Up @@ -911,7 +909,7 @@ def _getitem_nested_tuple(self, tup: tuple):

return obj

def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
def _convert_to_indexer(self, key, axis: int):
raise AbstractMethodError(self)

def __getitem__(self, key):
Expand Down Expand Up @@ -1176,7 +1174,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
# return a DatetimeIndex instead of a slice object.
return self.obj.take(indexer, axis=axis)

def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
def _convert_to_indexer(self, key, axis: int):
"""
Convert indexing key into something we can use to do actual fancy
indexing on a ndarray.
Expand Down Expand Up @@ -1486,7 +1484,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
labels._validate_positional_slice(slice_obj)
return self.obj._slice(slice_obj, axis=axis)

def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
def _convert_to_indexer(self, key, axis: int):
"""
Much simpler as we only have to deal with our valid types.
"""
Expand Down
1 change: 1 addition & 0 deletions pandas/core/internals/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -672,6 +672,7 @@ def _combine_concat_plans(plans, concat_axis: int):
offset += last_plc.as_slice.stop

else:
# singleton list so we can modify it as a side-effect within _next_or_none
num_ended = [0]

def _next_or_none(seq):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -4386,7 +4386,7 @@ def _reduce(
return op(delegate, skipna=skipna, **kwds)

def _reindex_indexer(
self, new_index: Index | None, indexer: np.ndarray | None, copy: bool
self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool
) -> Series:
# Note: new_index is None iff indexer is None
# if not None, indexer is np.intp
Expand Down