|
8 | 8 | import re
|
9 | 9 | from textwrap import dedent
|
10 | 10 | from typing import (
|
| 11 | + TYPE_CHECKING, |
11 | 12 | Any,
|
12 | 13 | Callable,
|
13 | 14 | Dict,
|
|
18 | 19 | Sequence,
|
19 | 20 | Set,
|
20 | 21 | Union,
|
| 22 | + cast, |
21 | 23 | )
|
22 | 24 | import warnings
|
23 | 25 | import weakref
|
|
88 | 90 | from pandas.io.formats.printing import pprint_thing
|
89 | 91 | from pandas.tseries.frequencies import to_offset
|
90 | 92 |
|
| 93 | +if TYPE_CHECKING: |
| 94 | + from pandas import Series # noqa: F401 |
| 95 | + |
91 | 96 | # goal is to be able to define the docs close to function, while still being
|
92 | 97 | # able to share
|
93 | 98 | _shared_docs = dict() # type: Dict[str, str]
|
@@ -458,7 +463,7 @@ def _get_block_manager_axis(cls, axis):
|
458 | 463 | return m - axis
|
459 | 464 | return axis
|
460 | 465 |
|
461 |
| - def _get_axis_resolvers(self, axis): |
| 466 | + def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex]]: |
462 | 467 | # index or columns
|
463 | 468 | axis_index = getattr(self, axis)
|
464 | 469 | d = dict()
|
@@ -488,8 +493,8 @@ def _get_axis_resolvers(self, axis):
|
488 | 493 | d[axis] = dindex
|
489 | 494 | return d
|
490 | 495 |
|
491 |
| - def _get_index_resolvers(self): |
492 |
| - d = {} |
| 496 | + def _get_index_resolvers(self) -> Dict[str, Union["Series", MultiIndex]]: |
| 497 | + d: Dict[str, Union["Series", MultiIndex]] = {} |
493 | 498 | for axis_name in self._AXIS_ORDERS:
|
494 | 499 | d.update(self._get_axis_resolvers(axis_name))
|
495 | 500 | return d
|
@@ -2057,7 +2062,7 @@ def __setstate__(self, state):
|
2057 | 2062 | # old pickling format, for compatibility
|
2058 | 2063 | self._unpickle_matrix_compat(state)
|
2059 | 2064 |
|
2060 |
| - self._item_cache = {} |
| 2065 | + self._item_cache: Dict = {} |
2061 | 2066 |
|
2062 | 2067 | # ----------------------------------------------------------------------
|
2063 | 2068 | # Rendering Methods
|
@@ -3556,9 +3561,9 @@ class animal locomotion
|
3556 | 3561 | loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
|
3557 | 3562 |
|
3558 | 3563 | # create the tuple of the indexer
|
3559 |
| - indexer = [slice(None)] * self.ndim |
3560 |
| - indexer[axis] = loc |
3561 |
| - indexer = tuple(indexer) |
| 3564 | + indexer_ = [slice(None)] * self.ndim |
| 3565 | + indexer_[axis] = loc |
| 3566 | + indexer = tuple(indexer_) |
3562 | 3567 |
|
3563 | 3568 | result = self.iloc[indexer]
|
3564 | 3569 | setattr(result, result._get_axis_name(axis), new_ax)
|
@@ -5598,7 +5603,7 @@ def get_dtype_counts(self):
|
5598 | 5603 | stacklevel=2,
|
5599 | 5604 | )
|
5600 | 5605 |
|
5601 |
| - from pandas import Series |
| 5606 | + from pandas import Series # noqa: F811 |
5602 | 5607 |
|
5603 | 5608 | return Series(self._data.get_dtype_counts())
|
5604 | 5609 |
|
@@ -5641,7 +5646,7 @@ def get_ftype_counts(self):
|
5641 | 5646 | stacklevel=2,
|
5642 | 5647 | )
|
5643 | 5648 |
|
5644 |
| - from pandas import Series |
| 5649 | + from pandas import Series # noqa: F811 |
5645 | 5650 |
|
5646 | 5651 | return Series(self._data.get_ftype_counts())
|
5647 | 5652 |
|
@@ -5677,7 +5682,7 @@ def dtypes(self):
|
5677 | 5682 | string object
|
5678 | 5683 | dtype: object
|
5679 | 5684 | """
|
5680 |
| - from pandas import Series |
| 5685 | + from pandas import Series # noqa: F811 |
5681 | 5686 |
|
5682 | 5687 | return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
|
5683 | 5688 |
|
@@ -5726,7 +5731,7 @@ def ftypes(self):
|
5726 | 5731 | stacklevel=2,
|
5727 | 5732 | )
|
5728 | 5733 |
|
5729 |
| - from pandas import Series |
| 5734 | + from pandas import Series # noqa: F811 |
5730 | 5735 |
|
5731 | 5736 | return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_)
|
5732 | 5737 |
|
@@ -5905,10 +5910,10 @@ def astype(self, dtype, copy=True, errors="raise"):
|
5905 | 5910 | elif is_extension_array_dtype(dtype) and self.ndim > 1:
|
5906 | 5911 | # GH 18099/22869: columnwise conversion to extension dtype
|
5907 | 5912 | # GH 24704: use iloc to handle duplicate column names
|
5908 |
| - results = ( |
| 5913 | + results = [ |
5909 | 5914 | self.iloc[:, i].astype(dtype, copy=copy)
|
5910 | 5915 | for i in range(len(self.columns))
|
5911 |
| - ) |
| 5916 | + ] |
5912 | 5917 |
|
5913 | 5918 | else:
|
5914 | 5919 | # else, only a single dtype is given
|
@@ -6277,7 +6282,7 @@ def fillna(
|
6277 | 6282 |
|
6278 | 6283 | if self.ndim == 1:
|
6279 | 6284 | if isinstance(value, (dict, ABCSeries)):
|
6280 |
| - from pandas import Series |
| 6285 | + from pandas import Series # noqa: F811 |
6281 | 6286 |
|
6282 | 6287 | value = Series(value)
|
6283 | 6288 | elif not is_list_like(value):
|
@@ -6681,8 +6686,11 @@ def replace(
|
6681 | 6686 | to_replace = regex
|
6682 | 6687 | regex = True
|
6683 | 6688 |
|
6684 |
| - items = list(to_replace.items()) |
6685 |
| - keys, values = zip(*items) if items else ([], []) |
| 6689 | + items = list(cast(dict, to_replace).items()) |
| 6690 | + if items: |
| 6691 | + keys, values = zip(*items) |
| 6692 | + else: |
| 6693 | + keys, values = ([], []) |
6686 | 6694 |
|
6687 | 6695 | are_mappings = [is_dict_like(v) for v in values]
|
6688 | 6696 |
|
@@ -7219,7 +7227,7 @@ def asof(self, where, subset=None):
|
7219 | 7227 |
|
7220 | 7228 | if where < start:
|
7221 | 7229 | if not is_series:
|
7222 |
| - from pandas import Series |
| 7230 | + from pandas import Series # noqa: F811 |
7223 | 7231 |
|
7224 | 7232 | return Series(index=self.columns, name=where)
|
7225 | 7233 | return np.nan
|
@@ -10266,7 +10274,7 @@ def describe_1d(data):
|
10266 | 10274 |
|
10267 | 10275 | ldesc = [describe_1d(s) for _, s in data.items()]
|
10268 | 10276 | # set a convenient order for rows
|
10269 |
| - names = [] |
| 10277 | + names: List = [] |
10270 | 10278 | ldesc_indexes = sorted((x.index for x in ldesc), key=len)
|
10271 | 10279 | for idxnames in ldesc_indexes:
|
10272 | 10280 | for name in idxnames:
|
|
0 commit comments