diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 4468b5e07cc07..2eb6d08ce22e5 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -261,8 +261,8 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - MSG='Validate docstrings (GL03, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA05)' ; echo $MSG - $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA05 + MSG='Validate docstrings (GL03, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05 RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 87d76415a9729..4353e0b3edd08 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -205,6 +205,11 @@ def decorator(accessor): Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. +Returns +------- +callable + A class decorator. + See Also -------- %(others)s diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index d0daaf0dc3400..40d71c0872526 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -619,6 +619,10 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): When `dtype` is provided, neither `categories` nor `ordered` should be provided. + Returns + ------- + Categorical + Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) @@ -756,6 +760,11 @@ def as_ordered(self, inplace=False): inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to True. + + Returns + ------- + Categorical + Ordered Categorical. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(True, inplace=inplace) @@ -769,6 +778,11 @@ def as_unordered(self, inplace=False): inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False. + + Returns + ------- + Categorical + Unordered Categorical. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index c73ac0ab5a543..a746866a9fd74 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -239,6 +239,10 @@ def _from_factorized(cls, values, original): .. versionadded:: 0.23.0 + Returns + ------- + %(klass)s + See Also -------- interval_range : Function to create a fixed frequency IntervalIndex. @@ -383,6 +387,10 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None): ..versionadded:: 0.23.0 + Returns + ------- + %(klass)s + See Also -------- interval_range : Function to create a fixed frequency IntervalIndex. diff --git a/pandas/core/base.py b/pandas/core/base.py index 232962fef7255..72db3cee0e80f 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -656,6 +656,10 @@ class IndexOpsMixin: def transpose(self, *args, **kwargs): """ Return the transpose, which is by definition self. + + Returns + ------- + %(klass)s """ nv.validate_transpose(args, kwargs) return self @@ -696,6 +700,11 @@ def ndim(self): def item(self): """ Return the first element of the underlying data as a python scalar. + + Returns + ------- + scalar + The first element of %(klass)s. """ return self.values.item() @@ -1022,6 +1031,11 @@ def argmax(self, axis=None, skipna=True): Dummy argument for consistency with Series skipna : bool, default True + Returns + ------- + numpy.ndarray + Indices of the maximum values. + See Also -------- numpy.ndarray.argmax @@ -1122,6 +1136,10 @@ def __iter__(self): These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + iterator """ # We are explicity making element iterators. if is_datetimelike(self._values): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index a11773f4d6b70..6eca8313e1427 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -27,6 +27,11 @@ def register_extension_dtype(cls): This enables operations like ``.astype(name)`` for the name of the ExtensionDtype. + Returns + ------- + callable + A class decorator. + Examples -------- >>> from pandas.api.extensions import register_extension_dtype diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 24d4981a7a3f7..63cb4d85ca308 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -435,6 +435,10 @@ def is_hashable(obj): Distinguish between these and other types by trying the call to hash() and seeing if they raise TypeError. + Returns + ------- + bool + Examples -------- >>> a = ([],) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d5842e0eb3458..ac4e1b8f16d69 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -839,12 +839,12 @@ def itertuples(self, index=True, name="Pandas"): The name of the returned namedtuples or None to return regular tuples. - Yields + Returns ------- - collections.namedtuple - Yields a namedtuple for each row in the DataFrame with the first - field possibly being the index and following fields being the - column values. + iterator + An object to iterate over namedtuples for each row in the + DataFrame with the first field possibly being the index and + following fields being the column values. See Also -------- @@ -3651,6 +3651,10 @@ def lookup(self, row_labels, col_labels): col_labels : sequence The column labels to use for lookup + Returns + ------- + numpy.ndarray + Notes ----- Akin to:: @@ -6053,6 +6057,11 @@ def unstack(self, level=-1, fill_value=None): col_level : int or string, optional If columns are a MultiIndex then use this level to melt. + Returns + ------- + DataFrame + Unpivoted DataFrame. + See Also -------- %(other)s diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6af5a4d7b9c14..12adc22720654 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1518,6 +1518,11 @@ def bool(self): This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean + + Returns + ------- + bool + Same single boolean value converted to bool type. """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): @@ -1845,7 +1850,14 @@ def __hash__(self): ' hashed'.format(self.__class__.__name__)) def __iter__(self): - """Iterate over info axis""" + """ + Iterate over info axis. + + Returns + ------- + iterator + Info axis as iterator. + """ return iter(self._info_axis) # can we get a better explanation of this? @@ -1853,6 +1865,11 @@ def keys(self): """Get the 'info axis' (see Indexing for more) This is index for Series, columns for DataFrame. + + Returns + ------- + Index + Info axis. """ return self._info_axis @@ -1946,6 +1963,11 @@ def __array_wrap__(self, result, context=None): def to_dense(self): """ Return dense representation of NDFrame (as opposed to sparse). + + Returns + ------- + %(klass)s + Dense %(klass)s. """ # compat return self @@ -2238,6 +2260,12 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, .. versionadded:: 0.23.0 + Returns + ------- + None or str + If path_or_buf is None, returns the resulting json format as a + string. Otherwise returns None. + See Also -------- read_json @@ -2418,6 +2446,12 @@ def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): (default is False) compress : type of compressor (zlib or blosc), default to None (no compression) + + Returns + ------- + None or str + If path_or_buf is None, returns the resulting msgpack format as a + string. Otherwise returns None. """ from pandas.io import packers @@ -6167,6 +6201,11 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, def ffill(self, axis=None, inplace=False, limit=None, downcast=None): """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. + + Returns + ------- + %(klass)s + Object with missing values filled. """ return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) @@ -6174,6 +6213,11 @@ def ffill(self, axis=None, inplace=False, limit=None, downcast=None): def bfill(self, axis=None, inplace=False, limit=None, downcast=None): """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. + + Returns + ------- + %(klass)s + Object with missing values filled. """ return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) @@ -9313,6 +9357,8 @@ def tz_convert(self, tz, axis=0, level=None, copy=True): Returns ------- + %(klass)s + Object with time zone converted axis. Raises ------ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f8b9ddce6000e..21ce0fe1cea08 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -999,6 +999,11 @@ def true_and_notna(x, *args, **kwargs): def nunique(self, dropna=True): """ Return number of unique elements in the group. + + Returns + ------- + Series + Number of unique values within each group. """ ids, _, _ = self.grouper.group_info @@ -1181,7 +1186,14 @@ def value_counts(self, normalize=False, sort=True, ascending=False, return Series(out, index=mi, name=self._selection_name) def count(self): - """ Compute count of group, excluding missing values """ + """ + Compute count of group, excluding missing values. + + Returns + ------- + Series + Count of values within each group. + """ ids, _, ngroups = self.grouper.group_info val = self.obj.get_values() @@ -1479,7 +1491,14 @@ def _fill(self, direction, limit=None): return concat((self._wrap_transformed_output(output), res), axis=1) def count(self): - """ Compute count of group, excluding missing values """ + """ + Compute count of group, excluding missing values. + + Returns + ------- + DataFrame + Count of values within each group. + """ from pandas.core.dtypes.missing import _isna_ndarraylike as _isna data, _ = self._get_data_to_aggregate() diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 40f335ad1d6f7..798a65381fe00 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1070,6 +1070,10 @@ def any(self, skipna=True): ---------- skipna : bool, default True Flag to ignore nan values during truth testing + + Returns + ------- + bool """ return self._bool_agg('any', skipna) @@ -1083,6 +1087,10 @@ def all(self, skipna=True): ---------- skipna : bool, default True Flag to ignore nan values during truth testing + + Returns + ------- + bool """ return self._bool_agg('all', skipna) @@ -1091,6 +1099,11 @@ def all(self, skipna=True): def count(self): """ Compute count of group, excluding missing values. + + Returns + ------- + Series or DataFrame + Count of values within each group. """ # defined here for API doc @@ -1157,6 +1170,11 @@ def median(self, **kwargs): Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex + + Returns + ------- + Series or DataFrame + Median of values within each group. """ try: return self._cython_agg_general('median', **kwargs) @@ -1183,6 +1201,11 @@ def std(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + Series or DataFrame + Standard deviation of values within each group. """ # TODO: implement at Cython level? @@ -1201,6 +1224,11 @@ def var(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + Series or DataFrame + Variance of values within each group. """ nv.validate_groupby_func('var', args, kwargs) if ddof == 1: @@ -1227,6 +1255,11 @@ def sem(self, ddof=1): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + Series or DataFrame + Standard error of the mean of values within each group. """ return self.std(ddof=ddof) / np.sqrt(self.count()) @@ -1236,6 +1269,11 @@ def sem(self, ddof=1): def size(self): """ Compute group sizes. + + Returns + ------- + Series + Number of rows in each group. """ result = self.grouper.size() @@ -1253,7 +1291,14 @@ def groupby_function(name, alias, npfunc, numeric_only=True, _convert=False, min_count=-1): - _local_template = "Compute %(f)s of group values" + _local_template = """ + Compute %(f)s of group values. + + Returns + ------- + Series or DataFrame + Computed %(f)s of values within each group. + """ @Substitution(name='groupby', f=name) @Appender(_common_see_also) @@ -1326,6 +1371,11 @@ def ohlc(self): Compute sum of values, excluding missing values. For multiple groupings, the result index will be a MultiIndex + + Returns + ------- + DataFrame + Open, high, low and close values within each group. """ return self._apply_to_column_groupbys( @@ -1510,6 +1560,11 @@ def pad(self, limit=None): limit : integer, optional limit of how many values to fill + Returns + ------- + Series or DataFrame + Object with missing values filled. + See Also -------- Series.pad @@ -1530,6 +1585,11 @@ def backfill(self, limit=None): limit : integer, optional limit of how many values to fill + Returns + ------- + Series or DataFrame + Object with missing values filled. + See Also -------- Series.backfill @@ -1560,6 +1620,11 @@ def nth(self, dropna : None or str, optional apply the specified dropna operation before counting which row is the nth row. Needs to be None, 'any' or 'all' + + Returns + ------- + Series or DataFrame + N-th value within each group. %(see_also)s Examples -------- @@ -1800,6 +1865,11 @@ def ngroup(self, ascending=True): ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. + Returns + ------- + Series + Unique numbers for each group. + See Also -------- .cumcount : Number the rows in each group. @@ -1863,6 +1933,11 @@ def cumcount(self, ascending=True): ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. + Returns + ------- + Series + Sequence number of each element within each group. + See Also -------- .ngroup : Number the groups themselves. @@ -1945,6 +2020,10 @@ def rank(self, method='average', ascending=True, na_option='keep', def cumprod(self, axis=0, *args, **kwargs): """ Cumulative product for each group. + + Returns + ------- + Series or DataFrame """ nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only', 'skipna']) @@ -1958,6 +2037,10 @@ def cumprod(self, axis=0, *args, **kwargs): def cumsum(self, axis=0, *args, **kwargs): """ Cumulative sum for each group. + + Returns + ------- + Series or DataFrame """ nv.validate_groupby_func('cumsum', args, kwargs, ['numeric_only', 'skipna']) @@ -1971,6 +2054,10 @@ def cumsum(self, axis=0, *args, **kwargs): def cummin(self, axis=0, **kwargs): """ Cumulative min for each group. + + Returns + ------- + Series or DataFrame """ if axis != 0: return self.apply(lambda x: np.minimum.accumulate(x, axis)) @@ -1982,6 +2069,10 @@ def cummin(self, axis=0, **kwargs): def cummax(self, axis=0, **kwargs): """ Cumulative max for each group. + + Returns + ------- + Series or DataFrame """ if axis != 0: return self.apply(lambda x: np.maximum.accumulate(x, axis)) @@ -2109,6 +2200,11 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None): fill_value : optional .. versionadded:: 0.24.0 + + Returns + ------- + Series or DataFrame + Object shifted within each group. """ if freq is not None or axis != 0 or not isna(fill_value): @@ -2127,6 +2223,11 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, axis=0): """ Calculate pct_change of each value to previous entry in group. + + Returns + ------- + Series or DataFrame + Percentage changes within each group. """ if freq is not None or axis != 0: return self.apply(lambda x: x.pct_change(periods=periods, @@ -2147,6 +2248,10 @@ def head(self, n=5): Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. + + Returns + ------- + Series or DataFrame %(see_also)s Examples -------- @@ -2174,6 +2279,10 @@ def tail(self, n=5): Essentially equivalent to ``.apply(lambda x: x.tail(n))``, except ignores as_index flag. + + Returns + ------- + Series or DataFrame %(see_also)s Examples -------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6bb8f299e811f..8f0bf668e59ac 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -692,6 +692,11 @@ def ravel(self, order='C'): """ Return an ndarray of the flattened values of the underlying data. + Returns + ------- + numpy.ndarray + Flattened array. + See Also -------- numpy.ndarray.ravel @@ -728,6 +733,11 @@ def view(self, cls=None): or the original Index is returned. .. versionadded:: 0.19.0 + + Returns + ------- + Index + Index with values cast to specified dtype. """ @Appender(_index_shared_docs['astype']) @@ -777,6 +787,11 @@ def astype(self, dtype, copy=True): If allow_fill=True and fill_value is not None, indices specified by -1 is regarded as NA. If Index doesn't hold NA, raise ValueError + Returns + ------- + numpy.ndarray + Elements of given indices. + See Also -------- numpy.ndarray.take @@ -1038,6 +1053,11 @@ def to_native_types(self, slicer=None, **kwargs): Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values + + Returns + ------- + numpy.ndarray + Formatted values. """ values = self @@ -3716,6 +3736,10 @@ def memory_usage(self, deep=False): ---------- cond : boolean array-like with the same length as self other : scalar, or array-like + + Returns + ------- + Index """ @Appender(_index_shared_docs['where']) @@ -4026,6 +4050,10 @@ def putmask(self, mask, value): """ Return a new Index of the values set with the mask. + Returns + ------- + Index + See Also -------- numpy.ndarray.putmask @@ -4044,6 +4072,11 @@ def putmask(self, mask, value): def equals(self, other): """ Determine if two Index objects contain the same elements. + + Returns + ------- + bool + If two Index objects have equal elements True, otherwise False. """ if self.is_(other): return True @@ -4065,6 +4098,12 @@ def identical(self, other): """ Similar to equals, but check that other comparable attributes are also equal. + + Returns + ------- + bool + If two Index objects have equal elements and same type True, + otherwise False. """ return (self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) @@ -4336,6 +4375,11 @@ def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing. + + Returns + ------- + scalar + A value in the Series with the index of the key value in self. """ # if we have something that is Index-like, then @@ -4443,6 +4487,11 @@ def get_indexer_for(self, target, **kwargs): This dispatches to get_indexer or get_indexer_nonunique as appropriate. + + Returns + ------- + numpy.ndarray + List of indices. """ if self.is_unique: return self.get_indexer(target, **kwargs) @@ -4775,6 +4824,11 @@ def get_slice_bound(self, label, side, kind): label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} + + Returns + ------- + int + Index of label. """ assert kind in ['ix', 'loc', 'getitem', None] diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 473686a7541a5..9bb1ec0e960f4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -284,6 +284,12 @@ def _is_dtype_compat(self, other): def equals(self, other): """ Determine if two CategorialIndex objects contain the same elements. + + Returns + ------- + bool + If two CategorialIndex objects have equal elements True, + otherwise False. """ if self.is_(other): return True diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index dba7dae2251be..8dd75e1dfe69b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -712,6 +712,10 @@ def to_series(self, keep_tz=None, index=None, name=None): def snap(self, freq='S'): """ Snap time stamps to nearest occurring frequency + + Returns + ------- + DatetimeIndex """ # Superdumb, punting on any optimizing freq = to_offset(freq) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index dd3ac02805c8e..1b60f9b21bbd1 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1562,6 +1562,10 @@ def is_all_dates(self): def is_lexsorted(self): """ Return True if the codes are lexicographically sorted + + Returns + ------- + bool """ return self.lexsort_depth == self.nlevels @@ -2004,6 +2008,10 @@ def reorder_levels(self, order): Parameters ---------- + + Returns + ------- + MultiIndex """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 7ba3b826e920f..e969c4b695d30 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -125,7 +125,13 @@ def ensure_int(value, field): @classmethod def from_range(cls, data, name=None, dtype=None, **kwargs): - """ Create RangeIndex from a range object. """ + """ + Create RangeIndex from a range object. + + Returns + ------- + RangeIndex + """ if not isinstance(data, range): raise TypeError( '{0}(...) must be called with object coercible to a ' diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 4edcdd2dc1060..b2d30b5f34a75 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -780,6 +780,11 @@ def asfreq(self, fill_value=None): .. versionadded:: 0.20.0 + Returns + ------- + DataFrame or Series + Values at the specified freq. + See Also -------- Series.asfreq @@ -795,6 +800,11 @@ def std(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 Degrees of freedom. + + Returns + ------- + DataFrame or Series + Standard deviation of values within each group. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof) @@ -807,6 +817,11 @@ def var(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + DataFrame or Series + Variance of values within each group. """ nv.validate_resampler_func('var', args, kwargs) return self._downsample('var', ddof=ddof) @@ -830,6 +845,11 @@ def quantile(self, q=0.5, **kwargs): ---------- q : float or array-like, default 0.5 (50% quantile) + Returns + ------- + DataFrame or Series + Quantile of values within each group. + See Also -------- Series.quantile diff --git a/pandas/core/series.py b/pandas/core/series.py index 25e8006b1b89f..e23d0bf6a5b83 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -319,6 +319,11 @@ def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, .. deprecated :: 0.23.0 Use pd.Series(..) constructor instead. + + Returns + ------- + Series + Constructed Series. """ warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.Series(..) " @@ -486,6 +491,11 @@ def _formatting_values(self): def get_values(self): """ Same as values (but handles sparseness conversions); is a view. + + Returns + ------- + numpy.ndarray + Data of the Series. """ return self._data.get_values() @@ -526,6 +536,11 @@ def compress(self, condition, *args, **kwargs): .. deprecated:: 0.24.0 + Returns + ------- + Series + Series without the slices for which condition is false. + See Also -------- numpy.ndarray.compress @@ -550,6 +565,11 @@ def nonzero(self): but it will always be a one-item tuple because series only have one dimension. + Returns + ------- + numpy.ndarray + Indices of elements that are non-zero. + See Also -------- numpy.nonzero @@ -1479,6 +1499,11 @@ def iteritems(self): def keys(self): """ Return alias for index. + + Returns + ------- + Index + Index of the Series. """ return self.index @@ -3942,6 +3967,11 @@ def reindex_axis(self, labels, axis=0, **kwargs): .. deprecated:: 0.21.0 Use ``Series.reindex`` instead. + + Returns + ------- + Series + Reindexed Series. """ # for compatibility with higher dims if axis != 0: @@ -4405,6 +4435,11 @@ def valid(self, inplace=False, **kwargs): .. deprecated:: 0.23.0 Use :meth:`Series.dropna` instead. + + Returns + ------- + Series + Series without null values. """ warnings.warn("Method .valid will be removed in a future version. " "Use .dropna instead.", FutureWarning, stacklevel=2) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index a41e97831d104..8aad47c231c70 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -827,6 +827,11 @@ def parse(self, Equivalent to read_excel(ExcelFile, ...) See the read_excel docstring for more info on accepted parameters + + Returns + ------- + DataFrame or dict of DataFrames + DataFrame from the passed in Excel file. """ # Can't use _deprecate_kwarg since sheetname=None has a special meaning diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 55dee4d065392..95c9a2ab63159 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -534,6 +534,10 @@ def keys(self): Return a (potentially unordered) list of the keys corresponding to the objects stored in the HDFStore. These are ABSOLUTE path-names (e.g. have the leading '/' + + Returns + ------- + list """ return [n._v_pathname for n in self.groups()] @@ -1079,6 +1083,10 @@ def create_table_index(self, key, **kwargs): def groups(self): """return a list of all the top-level nodes (that are not themselves a pandas storage object) + + Returns + ------- + list """ _tables() self._check_if_open() @@ -1213,6 +1221,10 @@ def info(self): Print detailed information on the store. .. versionadded:: 0.21.0 + + Returns + ------- + str """ output = '{type}\nFile path: {path}\n'.format( type=type(self), path=pprint_thing(self._path)) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7db614cc6a6ac..0cbde6d0d85b2 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1750,6 +1750,10 @@ def variable_labels(self): """ Return variable labels as a dict, associating each variable name with corresponding label. + + Returns + ------- + dict """ return dict(zip(self.varlist, self._variable_labels)) @@ -1757,6 +1761,10 @@ def value_labels(self): """ Return a dict, associating each variable name a dict, associating each value its corresponding label. + + Returns + ------- + dict """ if not self._value_labels_read: self._read_value_labels() diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 831694a3eba9c..3a674303de984 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2479,6 +2479,11 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, `**kwds` : keywords To be passed to the actual plotting function + Returns + ------- + matplotlib.AxesSubplot + A histogram plot. + See Also -------- matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index cefc4d8aca4f2..b361ccec0e867 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -44,6 +44,11 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, kwds : other plotting keyword arguments To be passed to scatter function + Returns + ------- + numpy.ndarray + A matrix of scatter plots. + Examples -------- >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index c436c99643341..9900ad1a1f4af 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -333,6 +333,11 @@ def name(self): def rollback(self, dt): """ Roll provided date backward to next offset only if not on offset. + + Returns + ------- + TimeStamp + Rolled timestamp if not on offset, otherwise unchanged timestamp. """ dt = as_timestamp(dt) if not self.onOffset(dt): @@ -342,6 +347,11 @@ def rollback(self, dt): def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. + + Returns + ------- + TimeStamp + Rolled timestamp if not on offset, otherwise unchanged timestamp. """ dt = as_timestamp(dt) if not self.onOffset(dt):