Skip to content

TYP: indexes #40744

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 17 commits into from
Apr 13, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
_infer_matches = ("datetime", "datetime64", "date")

# define my properties & methods for delegation
_bool_ops = [
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
Expand All @@ -199,8 +199,8 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"is_year_end",
"is_leap_year",
]
_object_ops = ["freq", "tz"]
_field_ops = [
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
Expand All @@ -220,9 +220,9 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"microsecond",
"nanosecond",
]
_other_ops = ["date", "time", "timetz"]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods = [
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@

IntervalArrayT = TypeVar("IntervalArrayT", bound="IntervalArray")

_interval_shared_docs = {}
_interval_shared_docs: dict[str, str] = {}

_shared_docs_kwargs = {
"klass": "IntervalArray",
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/arrays/period.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,9 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps):

# Names others delegate to us
_other_ops: list[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
_bool_ops: list[str] = ["is_leap_year"]
_object_ops: list[str] = ["start_time", "end_time", "freq"]
_field_ops: list[str] = [
"year",
"month",
"day",
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1797,9 +1797,9 @@ def _drop_labels_or_levels(self, keys, axis: int = 0):

Parameters
----------
keys: str or list of str
keys : str or list of str
labels or levels to drop
axis: int, default 0
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)

Returns
Expand Down
17 changes: 11 additions & 6 deletions pandas/core/groupby/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
hold the allowlist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
from __future__ import annotations

import collections
from typing import List

from pandas._typing import final

Expand All @@ -19,7 +20,7 @@


class ShallowMixin(PandasObject):
_attributes: List[str] = []
_attributes: list[str] = []

@final
def _shallow_copy(self, obj, **kwargs):
Expand All @@ -39,7 +40,7 @@ class GotItemMixin(PandasObject):
Provide the groupby facilities to the mixed object.
"""

_attributes: List[str]
_attributes: list[str]

@final
def _gotitem(self, key, ndim, subset=None):
Expand Down Expand Up @@ -106,12 +107,16 @@ def _gotitem(self, key, ndim, subset=None):
| plotting_methods
)

series_apply_allowlist = (
series_apply_allowlist: frozenset[str] = (
common_apply_allowlist
| {"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"}
| frozenset(
{"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"}
)
) | frozenset(["dtype", "unique"])

dataframe_apply_allowlist = common_apply_allowlist | frozenset(["dtypes", "corrwith"])
dataframe_apply_allowlist: frozenset[str] = common_apply_allowlist | frozenset(
["dtypes", "corrwith"]
)

# cythonized transformations or canned "agg+broadcast", which do not
# require postprocessing of the result by transform.
Expand Down
28 changes: 14 additions & 14 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,15 +173,15 @@

_unsortable_types = frozenset(("mixed", "mixed-integer"))

_index_doc_kwargs = {
_index_doc_kwargs: dict[str, str] = {
"klass": "Index",
"inplace": "",
"target_klass": "Index",
"raises_section": "",
"unique": "Index",
"duplicated": "np.ndarray",
}
_index_shared_docs = {}
_index_shared_docs: dict[str, str] = {}
str_t = str


Expand Down Expand Up @@ -1189,7 +1189,7 @@ def _format_with_header(
return header + result

@final
def to_native_types(self, slicer=None, **kwargs):
def to_native_types(self, slicer=None, **kwargs) -> np.ndarray:
"""
Format specified values of `self` and return them.

Expand Down Expand Up @@ -4390,7 +4390,7 @@ def memory_usage(self, deep: bool = False) -> int:
return result

@final
def where(self, cond, other=None):
def where(self, cond, other=None) -> Index:
"""
Replace values where the condition is False.

Expand Down Expand Up @@ -4606,7 +4606,7 @@ def _can_hold_identifiers_and_holds_name(self, name) -> bool:
return name in self
return False

def append(self, other):
def append(self, other) -> Index:
"""
Append a collection of Index options together.

Expand All @@ -4616,7 +4616,7 @@ def append(self, other):

Returns
-------
appended : Index
Index
"""
to_concat = [self]

Expand Down Expand Up @@ -4846,7 +4846,7 @@ def asof(self, label):
loc = loc.indices(len(self))[-1]
return self[loc]

def asof_locs(self, where: Index, mask) -> np.ndarray:
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
Return the locations (indices) of labels in the index.

Expand All @@ -4863,21 +4863,21 @@ def asof_locs(self, where: Index, mask) -> np.ndarray:
----------
where : Index
An Index consisting of an array of timestamps.
mask : array-like
mask : np.ndarray[bool]
Array of booleans denoting where values in the original
data are not NA.

Returns
-------
numpy.ndarray
np.ndarray[np.intp]
An array of locations (indices) of the labels from the Index
which correspond to the return values of the `asof` function
for every element in `where`.
"""
locs = self._values[mask].searchsorted(where._values, side="right")
locs = np.where(locs > 0, locs - 1, 0)

result = np.arange(len(self))[mask].take(locs)
result = np.arange(len(self), dtype=np.intp)[mask].take(locs)

# TODO: overload return type of ExtensionArray.__getitem__
first_value = cast(Any, self._values[mask.argmax()])
Expand Down Expand Up @@ -5050,7 +5050,7 @@ def argsort(self, *args, **kwargs) -> np.ndarray:

Returns
-------
numpy.ndarray
np.ndarray[np.intp]
Integer indices that would sort the index if used as
an indexer.

Expand Down Expand Up @@ -5838,7 +5838,7 @@ def delete(self, loc) -> Index:
Returns
-------
Index
New Index with passed location(-s) deleted.
Will be same type as self, except for RangeIndex.

See Also
--------
Expand Down Expand Up @@ -6352,8 +6352,8 @@ def _maybe_cast_data_without_dtype(subarr):

elif inferred == "interval":
try:
data = IntervalArray._from_sequence(subarr, copy=False)
return data
ia_data = IntervalArray._from_sequence(subarr, copy=False)
return ia_data
except (ValueError, TypeError):
# GH27172: mixed closed Intervals --> object dtype
pass
Expand Down
27 changes: 13 additions & 14 deletions pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from __future__ import annotations

from typing import (
Any,
Hashable,
List,
Optional,
)
import warnings

Expand Down Expand Up @@ -50,7 +50,7 @@
inherit_names,
)

_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs: dict[str, str] = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "CategoricalIndex"})


Expand Down Expand Up @@ -216,10 +216,10 @@ def __new__(
data=None,
categories=None,
ordered=None,
dtype: Optional[Dtype] = None,
copy=False,
name=None,
):
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> CategoricalIndex:

name = maybe_extract_name(name, data, cls)

Expand All @@ -239,7 +239,7 @@ def _shallow_copy(
self,
values: Categorical,
name: Hashable = no_default,
):
) -> CategoricalIndex:
name = self._name if name is no_default else name

if values is not None:
Expand Down Expand Up @@ -349,7 +349,7 @@ def _format_attrs(self):
attrs.append(("length", len(self)))
return attrs

def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]:
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
from pandas.io.formats.printing import pprint_thing

result = [
Expand Down Expand Up @@ -422,10 +422,9 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):

target = ibase.ensure_index(target)

missing: List[int]
if self.equals(target):
indexer = None
missing = []
missing = np.array([], dtype=np.intp)
else:
indexer, missing = self.get_indexer_non_unique(np.array(target))

Expand Down Expand Up @@ -494,8 +493,8 @@ def _maybe_cast_indexer(self, key) -> int:
def _get_indexer(
self,
target: Index,
method: Optional[str] = None,
limit: Optional[int] = None,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:

Expand Down Expand Up @@ -626,7 +625,7 @@ def map(self, mapper):
mapped = self._values.map(mapper)
return Index(mapped, name=self.name)

def _concat(self, to_concat: List[Index], name: Hashable) -> Index:
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
# if calling index is category, don't check dtype of others
try:
codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
Expand Down
Loading