Skip to content

CLN: update Appender to doc decorator with case __doc__ #33112

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Mar 30, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

from pandas._libs import Timestamp, lib
from pandas._typing import FrameOrSeries
from pandas.util._decorators import Appender, Substitution
from pandas.util._decorators import Appender, Substitution, doc

from pandas.core.dtypes.cast import (
maybe_cast_result,
Expand Down Expand Up @@ -633,7 +633,7 @@ def nunique(self, dropna: bool = True) -> Series:
result = Series(res, index=ri, name=self._selection_name)
return self._reindex_output(result, fill_value=0)

@Appender(Series.describe.__doc__)
@doc(Series.describe)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class providing the base-class of operations.
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc

from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
Expand Down Expand Up @@ -1420,7 +1420,7 @@ def ohlc(self) -> DataFrame:
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))

@Appender(DataFrame.describe.__doc__)
@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
Expand Down Expand Up @@ -2509,7 +2509,7 @@ def _reindex_output(
GroupBy._add_numeric_operations()


@Appender(GroupBy.__doc__)
@doc(GroupBy)
def get_groupby(
obj: NDFrame,
by: Optional[_KeysArgType] = None,
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from pandas._typing import Label
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc

from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.cast import (
Expand Down Expand Up @@ -3835,7 +3835,7 @@ def values(self) -> np.ndarray:
return self._data.view(np.ndarray)

@cache_readonly
@Appender(IndexOpsMixin.array.__doc__) # type: ignore
@doc(IndexOpsMixin.array) # type: ignore
def array(self) -> ExtensionArray:
array = self._data
if isinstance(array, np.ndarray):
Expand Down Expand Up @@ -3876,7 +3876,7 @@ def _get_engine_target(self) -> np.ndarray:
"""
return self._values

@Appender(IndexOpsMixin.memory_usage.__doc__)
@doc(IndexOpsMixin.memory_usage)
def memory_usage(self, deep: bool = False) -> int:
result = super().memory_usage(deep=deep)

Expand Down
24 changes: 12 additions & 12 deletions pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from pandas._libs.hashtable import duplicated_int64
from pandas._libs.lib import no_default
from pandas._typing import Label
from pandas.util._decorators import Appender, cache_readonly
from pandas.util._decorators import Appender, cache_readonly, doc

from pandas.core.dtypes.common import (
ensure_platform_int,
Expand Down Expand Up @@ -241,7 +241,7 @@ def _simple_new(cls, values: Categorical, name: Label = None):

# --------------------------------------------------------------------

@Appender(Index._shallow_copy.__doc__)
@doc(Index._shallow_copy)
def _shallow_copy(self, values=None, name: Label = no_default):
name = self.name if name is no_default else name

Expand Down Expand Up @@ -354,7 +354,7 @@ def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True

@Appender(Index.__contains__.__doc__)
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
Expand All @@ -363,7 +363,7 @@ def __contains__(self, key: Any) -> bool:
hash(key)
return contains(self, key, container=self._engine)

@Appender(Index.astype.__doc__)
@doc(Index.astype)
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
Expand All @@ -382,7 +382,7 @@ def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1

@Appender(Index.fillna.__doc__)
@doc(Index.fillna)
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
Expand All @@ -395,7 +395,7 @@ def _engine(self):
codes = self.codes
return self._engine_type(lambda: codes, len(self))

@Appender(Index.unique.__doc__)
@doc(Index.unique)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
Expand All @@ -404,7 +404,7 @@ def unique(self, level=None):
# of result, not self.
return type(self)._simple_new(result, name=self.name)

@Appender(Index.duplicated.__doc__)
@doc(Index.duplicated)
def duplicated(self, keep="first"):
codes = self.codes.astype("i8")
return duplicated_int64(codes, keep)
Expand All @@ -418,7 +418,7 @@ def _maybe_cast_indexer(self, key):
code = self.codes.dtype.type(code)
return code

@Appender(Index.where.__doc__)
@doc(Index.where)
def where(self, cond, other=None):
# TODO: Investigate an alternative implementation with
# 1. copy the underlying Categorical
Expand Down Expand Up @@ -569,7 +569,7 @@ def get_indexer_non_unique(self, target):
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing

@Appender(Index._convert_list_indexer.__doc__)
@doc(Index._convert_list_indexer)
def _convert_list_indexer(self, keyarr):
# Return our indexer or raise if all of the values are not included in
# the categories
Expand All @@ -586,7 +586,7 @@ def _convert_list_indexer(self, keyarr):

return self.get_indexer(keyarr)

@Appender(Index._convert_arr_indexer.__doc__)
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
keyarr = com.asarray_tuplesafe(keyarr)

Expand All @@ -595,7 +595,7 @@ def _convert_arr_indexer(self, keyarr):

return self._shallow_copy(keyarr)

@Appender(Index._convert_index_indexer.__doc__)
@doc(Index._convert_index_indexer)
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)

Expand All @@ -608,7 +608,7 @@ def take_nd(self, *args, **kwargs):
)
return self.take(*args, **kwargs)

@Appender(Index._maybe_cast_slice_bound.__doc__)
@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side, kind):
if kind == "loc":
return label
Expand Down