From 89e5b225a8dec9cf2ad32246683e3dfd07573480 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sat, 27 Apr 2019 23:20:45 +0300 Subject: [PATCH 01/19] DOC: Fix validation error RT01 in pandas/core (#25356) Except errors in accessor.py, op.py and some errors in resample.py, frame.py --- pandas/core/base.py | 18 ++++++++++++++ pandas/core/frame.py | 14 +++++++++++ pandas/core/generic.py | 55 ++++++++++++++++++++++++++++++++++++++++- pandas/core/resample.py | 26 +++++++++++++++++++ pandas/core/series.py | 35 ++++++++++++++++++++++++++ 5 files changed, 147 insertions(+), 1 deletion(-) diff --git a/pandas/core/base.py b/pandas/core/base.py index 18ced05d00f25..6002f695a5c25 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -656,6 +656,10 @@ class IndexOpsMixin: def transpose(self, *args, **kwargs): """ Return the transpose, which is by definition self. + + Returns + ------- + self: %(klass)s """ nv.validate_transpose(args, kwargs) return self @@ -696,6 +700,11 @@ def ndim(self): def item(self): """ Return the first element of the underlying data as a python scalar. + + Returns + ------- + scalar + The first element of %(klass)s. """ return self.values.item() @@ -1016,6 +1025,11 @@ def argmax(self, axis=None, skipna=True): Dummy argument for consistency with Series skipna : bool, default True + Returns + ------- + numpy.ndarray + Indices of the maximum values. + See Also -------- numpy.ndarray.argmax @@ -1116,6 +1130,10 @@ def __iter__(self): These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + iterator """ # We are explicity making element iterators. if is_datetimelike(self._values): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e539391ba011e..3f76063e45e54 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1388,6 +1388,10 @@ def to_gbq(self, destination_table, project_id=None, chunksize=None, or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). + Returns + ------- + None + See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. @@ -3663,6 +3667,11 @@ def lookup(self, row_labels, col_labels): col_labels : sequence The column labels to use for lookup + Returns + ------- + numpy.ndarray + Array of the values corresponding to each (row, col) pair. + Notes ----- Akin to:: @@ -6051,6 +6060,11 @@ def unstack(self, level=-1, fill_value=None): col_level : int or string, optional If columns are a MultiIndex then use this level to melt. + Returns + ------- + DataFrame + Unpivoted DataFrame. + See Also -------- %(other)s diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0f92ea800c3e7..70a448f11d829 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1518,6 +1518,11 @@ def bool(self): This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean + + Returns + ------- + boolean + Same single boolean value converted to bool type. """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): @@ -1845,7 +1850,13 @@ def __hash__(self): ' hashed'.format(self.__class__.__name__)) def __iter__(self): - """Iterate over info axis""" + """Iterate over info axis + + Returns + ------- + iterator + Info axis as iterator. + """ return iter(self._info_axis) # can we get a better explanation of this? @@ -1854,6 +1865,11 @@ def keys(self): This is index for Series, columns for DataFrame and major_axis for Panel. + + Returns + ------- + Index + Info axis. """ return self._info_axis @@ -1948,6 +1964,11 @@ def __array_wrap__(self, result, context=None): def to_dense(self): """ Return dense representation of NDFrame (as opposed to sparse). + + Returns + ------- + self: %(klass)s + Dense %(klass)s. """ # compat return self @@ -2240,6 +2261,12 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, .. versionadded:: 0.23.0 + Returns + ------- + None or str + If path_or_buf is None, returns the resulting json format as a + string. Otherwise returns None. + See Also -------- read_json @@ -2364,6 +2391,10 @@ def to_hdf(self, path_or_buf, key, **kwargs): See the errors argument for :func:`open` for a full list of options. + Returns + ------- + None + See Also -------- DataFrame.read_hdf : Read from HDF file. @@ -2420,6 +2451,12 @@ def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): (default is False) compress : type of compressor (zlib or blosc), default to None (no compression) + + Returns + ------- + None or str + If path_or_buf is None, returns the resulting msgpack format as a + string. Otherwise returns None. """ from pandas.io import packers @@ -2583,6 +2620,10 @@ def to_pickle(self, path, compression='infer', .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 + Returns + ------- + None + See Also -------- read_pickle : Load pickled pandas object (or any object) from file. @@ -6171,6 +6212,11 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, def ffill(self, axis=None, inplace=False, limit=None, downcast=None): """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. + + Returns + ------- + %(klass)s + Object with missing values filled. """ return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) @@ -6178,6 +6224,11 @@ def ffill(self, axis=None, inplace=False, limit=None, downcast=None): def bfill(self, axis=None, inplace=False, limit=None, downcast=None): """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. + + Returns + ------- + %(klass)s + Object with missing values filled. """ return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) @@ -9270,6 +9321,8 @@ def tz_convert(self, tz, axis=0, level=None, copy=True): Returns ------- + %(klass)s + %(klass)s with time zone converted axis. Raises ------ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 4edcdd2dc1060..cf7105a819e4e 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -780,6 +780,11 @@ def asfreq(self, fill_value=None): .. versionadded:: 0.20.0 + Returns + ------- + DataFrame or Series + Values at the specified freq. + See Also -------- Series.asfreq @@ -795,6 +800,11 @@ def std(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 Degrees of freedom. + + Returns + ------- + DataFrame or Series + Standard deviation of values within each group. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof) @@ -807,12 +817,23 @@ def var(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + DataFrame or Series + Variance of values within each group. """ nv.validate_resampler_func('var', args, kwargs) return self._downsample('var', ddof=ddof) @Appender(GroupBy.size.__doc__) def size(self): + """ + Returns + ------- + Series + Number of rows in each group. + """ # It's a special case as higher level does return # a copy of 0-len objects. GH14962 result = self._downsample('size') @@ -830,6 +851,11 @@ def quantile(self, q=0.5, **kwargs): ---------- q : float or array-like, default 0.5 (50% quantile) + Returns + ------- + DataFrame or Series + Quantile of values within each group. + See Also -------- Series.quantile diff --git a/pandas/core/series.py b/pandas/core/series.py index 5d8de03f91e10..39c4e5bc24cbf 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -319,6 +319,11 @@ def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, .. deprecated :: 0.23.0 Use pd.Series(..) constructor instead. + + Returns + ------- + Series + Constructed Series. """ warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.Series(..) " @@ -486,6 +491,11 @@ def _formatting_values(self): def get_values(self): """ Same as values (but handles sparseness conversions); is a view. + + Returns + ------- + numpy.ndarray + Data of the Series. """ return self._data.get_values() @@ -526,6 +536,11 @@ def compress(self, condition, *args, **kwargs): .. deprecated:: 0.24.0 + Returns + ------- + Series + Series without the slices for which condition is false. + See Also -------- numpy.ndarray.compress @@ -550,6 +565,11 @@ def nonzero(self): but it will always be a one-item tuple because series only have one dimension. + Returns + ------- + numpy.ndarray + Indices of elements that are non-zero. + See Also -------- numpy.nonzero @@ -1479,6 +1499,11 @@ def iteritems(self): def keys(self): """ Return alias for index. + + Returns + ------- + Index + Index of the Series. """ return self.index @@ -3943,6 +3968,11 @@ def reindex_axis(self, labels, axis=0, **kwargs): .. deprecated:: 0.21.0 Use ``Series.reindex`` instead. + + Returns + ------- + Series + Reindexed Series. """ # for compatibility with higher dims if axis != 0: @@ -4406,6 +4436,11 @@ def valid(self, inplace=False, **kwargs): .. deprecated:: 0.23.0 Use :meth:`Series.dropna` instead. + + Returns + ------- + Series + Series without null values. """ warnings.warn("Method .valid will be removed in a future version. " "Use .dropna instead.", FutureWarning, stacklevel=2) From 4ef2b934cc5a7f473f1d6ab70fd6e515bb4e8a54 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sat, 27 Apr 2019 23:24:06 +0300 Subject: [PATCH 02/19] DOC: Fix validation error RT01 in pandas/tseries (#25356) --- pandas/tseries/offsets.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 96de63f63aac5..d6fb7bf8cb700 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -334,6 +334,11 @@ def name(self): def rollback(self, dt): """ Roll provided date backward to next offset only if not on offset. + + Returns + ------- + dt: TimeStamp + Rolled timestamp if not on offset, otherwise unchanged timestamp. """ dt = as_timestamp(dt) if not self.onOffset(dt): @@ -343,6 +348,11 @@ def rollback(self, dt): def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. + + Returns + ------- + dt: TimeStamp + Rolled timestamp if not on offset, otherwise unchanged timestamp. """ dt = as_timestamp(dt) if not self.onOffset(dt): From 648d1568bdd179c3f997bd0b8498e8b3a450b9c6 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sat, 27 Apr 2019 23:44:49 +0300 Subject: [PATCH 03/19] DOC: Fix validation error RT01 in pandas/core/arrays (#25356) --- pandas/core/arrays/categorical.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 9c2aa03102533..44d9386f7f1f2 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -619,6 +619,10 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): When `dtype` is provided, neither `categories` nor `ordered` should be provided. + Returns + ------- + Categorical + Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) From 9498481775f384f40fd38a836423211b1dfeb605 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sat, 27 Apr 2019 23:45:33 +0300 Subject: [PATCH 04/19] DOC: Fix validation error RT01 in pandas/core/dtypes (#25356) --- pandas/core/dtypes/dtypes.py | 5 +++++ pandas/core/dtypes/inference.py | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 417683ad54420..a983ab055cf8c 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -27,6 +27,11 @@ def register_extension_dtype(cls): This enables operations like ``.astype(name)`` for the name of the ExtensionDtype. + Returns + ------- + callable + A class decorator. + Examples -------- >>> from pandas.api.extensions import register_extension_dtype diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 0dc6ddbde9a77..6630a3eda0173 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -436,6 +436,10 @@ def is_hashable(obj): Distinguish between these and other types by trying the call to hash() and seeing if they raise TypeError. + Returns + ------- + bool + Examples -------- >>> a = ([],) From 7967414fe5ec24a73f331f3477d72211dd2c42c3 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 00:16:04 +0300 Subject: [PATCH 05/19] DOC: Fix validation error RT01 in pandas/plotting (#25356) --- pandas/plotting/_core.py | 5 +++++ pandas/plotting/_misc.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 92f2f2a69c665..5835579b38156 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2477,6 +2477,11 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, `**kwds` : keywords To be passed to the actual plotting function + Returns + ------- + axes: matplotlib.AxesSubplot + A histogram plot. + See Also -------- matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index cefc4d8aca4f2..b2e7e7c223ac7 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -44,6 +44,11 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, kwds : other plotting keyword arguments To be passed to scatter function + Returns + ------- + axes: numpy.ndarray + A matrix of scatter plots. + Examples -------- >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) From ac4e339a8a01dfc6b031ec6abc6f5351dc664806 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 00:16:33 +0300 Subject: [PATCH 06/19] DOC: Fix validation error RT01 in pandas/util (#25356) --- pandas/util/testing.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 9af5410e0738b..1ecf18ce5a114 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1007,6 +1007,10 @@ def assert_series_equal(left, right, check_dtype=True, obj : str, default 'Series' Specify object name being compared, internally used to show appropriate assertion message. + + Returns + ------- + None """ __tracebackhide__ = True From e1ae56ca2f624528a27f94218a11498cde43936b Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 00:26:30 +0300 Subject: [PATCH 07/19] DOC: Fix validation error RT01 in pandas/io (#25356) --- pandas/io/pytables.py | 12 ++++++++++++ pandas/io/stata.py | 8 ++++++++ 2 files changed, 20 insertions(+) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 19068eca38775..58474979e2561 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -535,6 +535,10 @@ def keys(self): Return a (potentially unordered) list of the keys corresponding to the objects stored in the HDFStore. These are ABSOLUTE path-names (e.g. have the leading '/' + + Returns + ------- + list """ return [n._v_pathname for n in self.groups()] @@ -1080,6 +1084,10 @@ def create_table_index(self, key, **kwargs): def groups(self): """return a list of all the top-level nodes (that are not themselves a pandas storage object) + + Returns + ------- + list """ _tables() self._check_if_open() @@ -1214,6 +1222,10 @@ def info(self): Print detailed information on the store. .. versionadded:: 0.21.0 + + Returns + ------- + str """ output = '{type}\nFile path: {path}\n'.format( type=type(self), path=pprint_thing(self._path)) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 550a6ca3cdc9f..b1e439eb88636 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1750,6 +1750,10 @@ def variable_labels(self): """ Return variable labels as a dict, associating each variable name with corresponding label. + + Returns + ------- + dict """ return dict(zip(self.varlist, self._variable_labels)) @@ -1757,6 +1761,10 @@ def value_labels(self): """ Return a dict, associating each variable name a dict, associating each value its corresponding label. + + Returns + ------- + dict """ if not self._value_labels_read: self._read_value_labels() From dd105a671985f649ff4d5f9d32de5948491c2b06 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 19:23:15 +0300 Subject: [PATCH 08/19] DOC: Fix remaining validation errors RT01 (#25356) --- pandas/core/accessor.py | 5 ++ pandas/core/arrays/categorical.py | 10 +++ pandas/core/arrays/interval.py | 8 +++ pandas/core/groupby/generic.py | 23 ++++++- pandas/core/groupby/groupby.py | 111 +++++++++++++++++++++++++++++- pandas/core/indexes/base.py | 54 +++++++++++++++ pandas/core/indexes/category.py | 6 ++ pandas/core/indexes/datetimes.py | 4 ++ pandas/core/indexes/multi.py | 8 +++ pandas/core/indexes/range.py | 8 ++- pandas/core/ops.py | 11 ++- pandas/core/resample.py | 6 -- pandas/io/excel/_base.py | 5 ++ 13 files changed, 248 insertions(+), 11 deletions(-) diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index aeebe686c63cb..f3af3512dec6a 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -206,6 +206,11 @@ def decorator(accessor): Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. +Returns +------- +callable + A class decorator. + See Also -------- %(others)s diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 44d9386f7f1f2..5257e7ff9d5c1 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -760,6 +760,11 @@ def as_ordered(self, inplace=False): inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to True. + + Returns + ------- + Categorical + Ordered Categorical. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(True, inplace=inplace) @@ -768,6 +773,11 @@ def as_unordered(self, inplace=False): """ Set the Categorical to be unordered. + Returns + ------- + Categorical + Unordered Categorical. + Parameters ---------- inplace : bool, default False diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 5de265eb83561..125a6e870500a 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -239,6 +239,10 @@ def _from_factorized(cls, values, original): .. versionadded:: 0.23.0 + Returns + ------- + %(klass)s + See Also -------- interval_range : Function to create a fixed frequency IntervalIndex. @@ -383,6 +387,10 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None): ..versionadded:: 0.23.0 + Returns + ------- + %(klass)s + See Also -------- interval_range : Function to create a fixed frequency IntervalIndex. diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f8b9ddce6000e..c5ac58f0674ac 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -999,6 +999,11 @@ def true_and_notna(x, *args, **kwargs): def nunique(self, dropna=True): """ Return number of unique elements in the group. + + Returns + ------- + Series + Numver of unique values within each group. """ ids, _, _ = self.grouper.group_info @@ -1181,7 +1186,14 @@ def value_counts(self, normalize=False, sort=True, ascending=False, return Series(out, index=mi, name=self._selection_name) def count(self): - """ Compute count of group, excluding missing values """ + """ + Compute count of group, excluding missing values. + + Returns + ------- + Series + Count of values within each group. + """ ids, _, ngroups = self.grouper.group_info val = self.obj.get_values() @@ -1479,7 +1491,14 @@ def _fill(self, direction, limit=None): return concat((self._wrap_transformed_output(output), res), axis=1) def count(self): - """ Compute count of group, excluding missing values """ + """ + Compute count of group, excluding missing values + + Returns + ------- + DataFrame + Count of values within each group. + """ from pandas.core.dtypes.missing import _isna_ndarraylike as _isna data, _ = self._get_data_to_aggregate() diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index bd8a8852964e3..2e9c7c1c14285 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1071,6 +1071,10 @@ def any(self, skipna=True): ---------- skipna : bool, default True Flag to ignore nan values during truth testing + + Returns + ------- + boolean """ return self._bool_agg('any', skipna) @@ -1084,6 +1088,10 @@ def all(self, skipna=True): ---------- skipna : bool, default True Flag to ignore nan values during truth testing + + Returns + ------- + boolean """ return self._bool_agg('all', skipna) @@ -1092,6 +1100,11 @@ def all(self, skipna=True): def count(self): """ Compute count of group, excluding missing values. + + Returns + ------- + Series or DataFrame + Count of values within each group. """ # defined here for API doc @@ -1161,6 +1174,11 @@ def median(self, **kwargs): Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex + + Returns + ------- + Series or DataFrame + Median of values within each group. """ try: return self._cython_agg_general('median', **kwargs) @@ -1187,6 +1205,11 @@ def std(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + Series or DataFrame + Standard deviation of values within each group. """ # TODO: implement at Cython level? @@ -1205,6 +1228,11 @@ def var(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + Series or DataFrame + Variance of values within each group. """ nv.validate_groupby_func('var', args, kwargs) if ddof == 1: @@ -1231,6 +1259,11 @@ def sem(self, ddof=1): ---------- ddof : integer, default 1 degrees of freedom + + Returns + ------- + Series or DataFrame + Standard error of the mean of values within each group. """ return self.std(ddof=ddof) / np.sqrt(self.count()) @@ -1240,6 +1273,11 @@ def sem(self, ddof=1): def size(self): """ Compute group sizes. + + Returns + ------- + Series + Number of rows in each group. """ result = self.grouper.size() @@ -1257,7 +1295,14 @@ def groupby_function(name, alias, npfunc, numeric_only=True, _convert=False, min_count=-1): - _local_template = "Compute %(f)s of group values" + _local_template = """ + Compute %(f)s of group values + + Returns + ------- + Series or DataFrame + %(f)s of values within each group. + """ @Substitution(name='groupby', f=name) @Appender(_common_see_also) @@ -1330,6 +1375,11 @@ def ohlc(self): Compute sum of values, excluding missing values. For multiple groupings, the result index will be a MultiIndex + + Returns + ------- + DataFrame + Open, high, low and close values within each group. """ return self._apply_to_column_groupbys( @@ -1514,6 +1564,11 @@ def pad(self, limit=None): limit : integer, optional limit of how many values to fill + Returns + ------- + Series or DataFrame + Object with missing values filled. + See Also -------- Series.pad @@ -1534,6 +1589,11 @@ def backfill(self, limit=None): limit : integer, optional limit of how many values to fill + Returns + ------- + Series or DataFrame + Object with missing values filled. + See Also -------- Series.backfill @@ -1563,6 +1623,11 @@ def nth(self, n, dropna=None): dropna : None or str, optional apply the specified dropna operation before counting which row is the nth row. Needs to be None, 'any' or 'all' + + Returns + ------- + Series or DataFrame + Nth value within each group. %(see_also)s Examples -------- @@ -1793,6 +1858,11 @@ def ngroup(self, ascending=True): ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. + Returns + ------- + Series + Unique numbers for each group. + See Also -------- .cumcount : Number the rows in each group. @@ -1856,6 +1926,11 @@ def cumcount(self, ascending=True): ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. + Returns + ------- + Series + Sequence number of each element within each group. + See Also -------- .ngroup : Number the groups themselves. @@ -1938,6 +2013,10 @@ def rank(self, method='average', ascending=True, na_option='keep', def cumprod(self, axis=0, *args, **kwargs): """ Cumulative product for each group. + + Returns + ------- + Series or DataFrame """ nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only', 'skipna']) @@ -1951,6 +2030,10 @@ def cumprod(self, axis=0, *args, **kwargs): def cumsum(self, axis=0, *args, **kwargs): """ Cumulative sum for each group. + + Returns + ------- + Series or DataFrame """ nv.validate_groupby_func('cumsum', args, kwargs, ['numeric_only', 'skipna']) @@ -1964,6 +2047,10 @@ def cumsum(self, axis=0, *args, **kwargs): def cummin(self, axis=0, **kwargs): """ Cumulative min for each group. + + Returns + ------- + Series or DataFrame """ if axis != 0: return self.apply(lambda x: np.minimum.accumulate(x, axis)) @@ -1975,6 +2062,10 @@ def cummin(self, axis=0, **kwargs): def cummax(self, axis=0, **kwargs): """ Cumulative max for each group. + + Returns + ------- + Series or DataFrame """ if axis != 0: return self.apply(lambda x: np.maximum.accumulate(x, axis)) @@ -2102,6 +2193,11 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None): fill_value : optional .. versionadded:: 0.24.0 + + Returns + ------- + Series or DataFrame + Object shifted within each group. """ if freq is not None or axis != 0 or not isna(fill_value): @@ -2120,6 +2216,11 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, axis=0): """ Calculate pct_change of each value to previous entry in group. + + Returns + ------- + Series or DataFrame + Percentage changes within each group. """ if freq is not None or axis != 0: return self.apply(lambda x: x.pct_change(periods=periods, @@ -2140,6 +2241,10 @@ def head(self, n=5): Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. + + Returns + ------- + Series or DataFrame %(see_also)s Examples -------- @@ -2167,6 +2272,10 @@ def tail(self, n=5): Essentially equivalent to ``.apply(lambda x: x.tail(n))``, except ignores as_index flag. + + Returns + ------- + Series or DataFrame %(see_also)s Examples -------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6bb8f299e811f..2a0df0af9b407 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -692,6 +692,11 @@ def ravel(self, order='C'): """ Return an ndarray of the flattened values of the underlying data. + Returns + ------- + result: numpy.ndarray + Flattened array. + See Also -------- numpy.ndarray.ravel @@ -728,6 +733,11 @@ def view(self, cls=None): or the original Index is returned. .. versionadded:: 0.19.0 + + Returns + ------- + Index + Index with values cast to specified dtype. """ @Appender(_index_shared_docs['astype']) @@ -777,6 +787,11 @@ def astype(self, dtype, copy=True): If allow_fill=True and fill_value is not None, indices specified by -1 is regarded as NA. If Index doesn't hold NA, raise ValueError + Returns + ------- + taken: numpy.ndarray + Elements of given indices. + See Also -------- numpy.ndarray.take @@ -1038,6 +1053,11 @@ def to_native_types(self, slicer=None, **kwargs): Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values + + Returns + ------- + numpy.ndarray + Formatted values. """ values = self @@ -3716,6 +3736,10 @@ def memory_usage(self, deep=False): ---------- cond : boolean array-like with the same length as self other : scalar, or array-like + + Returns + ------- + Index """ @Appender(_index_shared_docs['where']) @@ -4026,6 +4050,10 @@ def putmask(self, mask, value): """ Return a new Index of the values set with the mask. + Returns + ------- + Index + See Also -------- numpy.ndarray.putmask @@ -4044,6 +4072,11 @@ def putmask(self, mask, value): def equals(self, other): """ Determine if two Index objects contain the same elements. + + Returns + ------- + boolean + If two Index objects have equal elements True, otherwise False. """ if self.is_(other): return True @@ -4065,6 +4098,12 @@ def identical(self, other): """ Similar to equals, but check that other comparable attributes are also equal. + + Returns + ------- + boolean + If two Index objects have equal elements and same type True, + otherwise False. """ return (self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) @@ -4336,6 +4375,11 @@ def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing. + + Returns + ------- + scalar + A value in the series with the indice of the key value in self. """ # if we have something that is Index-like, then @@ -4443,6 +4487,11 @@ def get_indexer_for(self, target, **kwargs): This dispatches to get_indexer or get_indexer_nonunique as appropriate. + + Returns + ------- + numpy.ndarray + List of indices. """ if self.is_unique: return self.get_indexer(target, **kwargs) @@ -4770,6 +4819,11 @@ def get_slice_bound(self, label, side, kind): Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. + Returns + ------- + int + Indice of label. + Parameters ---------- label : object diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 473686a7541a5..3617def970081 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -284,6 +284,12 @@ def _is_dtype_compat(self, other): def equals(self, other): """ Determine if two CategorialIndex objects contain the same elements. + + Returns + ------- + boolean + If two CategorialIndex objects have equal elements True, + otherwise False. """ if self.is_(other): return True diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 151d66223ce1c..65b91b7e66b2e 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -761,6 +761,10 @@ def to_series(self, keep_tz=None, index=None, name=None): def snap(self, freq='S'): """ Snap time stamps to nearest occurring frequency + + Returns + ------- + DatetimeIndex """ # Superdumb, punting on any optimizing freq = to_offset(freq) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index d15c931190f30..f919212dad6e7 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1562,6 +1562,10 @@ def is_all_dates(self): def is_lexsorted(self): """ Return True if the codes are lexicographically sorted + + Returns + ------- + boolean """ return self.lexsort_depth == self.nlevels @@ -2004,6 +2008,10 @@ def reorder_levels(self, order): Parameters ---------- + + Returns + ------- + MultiIndex """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 9b0c611651b94..4909085433a5a 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -126,7 +126,13 @@ def ensure_int(value, field): @classmethod def from_range(cls, data, name=None, dtype=None, **kwargs): - """ Create RangeIndex from a range object. """ + """ + Create RangeIndex from a range object. + + Returns + ------- + RangeIndex + """ if not isinstance(data, range): raise TypeError( '{0}(...) must be called with object coercible to a ' diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 8c423e0cf304a..a314d3b2a2348 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1025,6 +1025,15 @@ def _get_op_name(op, special): Panel """ +_comp_doc_PANEL = """ +Wrapper for comparison method {name} + +Returns +------- +Panel of bool + Result of the comparison. +""" + def _make_flex_doc(op_name, typ): """ @@ -2327,7 +2336,7 @@ def na_op(x, y): result = mask_cmp_op(x, y, op, np.ndarray) return result - @Appender('Wrapper for comparison method {name}'.format(name=op_name)) + @Appender(_comp_doc_PANEL.format(name=op_name)) def f(self, other, axis=None): # Validate the axis parameter if axis is not None: diff --git a/pandas/core/resample.py b/pandas/core/resample.py index cf7105a819e4e..b2d30b5f34a75 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -828,12 +828,6 @@ def var(self, ddof=1, *args, **kwargs): @Appender(GroupBy.size.__doc__) def size(self): - """ - Returns - ------- - Series - Number of rows in each group. - """ # It's a special case as higher level does return # a copy of 0-len objects. GH14962 result = self._downsample('size') diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index c5f6019d7aeb5..3e9ccb00b35c9 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -794,6 +794,11 @@ def parse(self, Equivalent to read_excel(ExcelFile, ...) See the read_excel docstring for more info on accepted parameters + + Returns + ------- + DataFrame or dict of DataFrames + DataFrame from the passed in Excel file. """ # Can't use _deprecate_kwarg since sheetname=None has a special meaning From a10746822f6ef354a19702bb02a202db8d87cae6 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 22:38:50 +0300 Subject: [PATCH 09/19] DOC: Fix validation error RT01 for itertuples function (#25356) Change yield to return --- pandas/core/frame.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3f76063e45e54..d73b805ae32a3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -839,12 +839,12 @@ def itertuples(self, index=True, name="Pandas"): The name of the returned namedtuples or None to return regular tuples. - Yields + Returns ------- - collections.namedtuple - Yields a namedtuple for each row in the DataFrame with the first - field possibly being the index and following fields being the - column values. + iterator + An object to iterate over namedtuples for each row in the + DataFrame with the first field possibly being the index and + following fields being the column values. See Also -------- From c7124e7719f76c06debf53ee03590c2b1c8973eb Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 22:41:57 +0300 Subject: [PATCH 10/19] DOC: Update the code_check.sh script to take into account the RT01 type of errors (#25356) --- ci/code_checks.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 9a95b7887cfab..01e4e2ff9243d 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -248,8 +248,8 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - MSG='Validate docstrings (GL03, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA05)' ; echo $MSG - $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA05 + MSG='Validate docstrings (GL03, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05 RET=$(($RET + $?)) ; echo $MSG "DONE" fi From 26a3dc5bd3f5ea0fa5a594597dc4f3e8b2b85894 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 23:06:25 +0300 Subject: [PATCH 11/19] DOC: Remove repeating return description (#25356) --- pandas/core/frame.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d73b805ae32a3..b606b3f9327a3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3670,7 +3670,6 @@ def lookup(self, row_labels, col_labels): Returns ------- numpy.ndarray - Array of the values corresponding to each (row, col) pair. Notes ----- From 5920ad2a5466adae921cd427cd28373d739df041 Mon Sep 17 00:00:00 2001 From: ihsan Date: Sun, 28 Apr 2019 23:30:13 +0300 Subject: [PATCH 12/19] DOC: Remove whitespaces (#25356) --- pandas/core/generic.py | 2 +- pandas/core/groupby/generic.py | 4 ++-- pandas/core/groupby/groupby.py | 12 ++++++------ pandas/core/indexes/base.py | 2 +- pandas/core/indexes/range.py | 2 +- pandas/io/excel/_base.py | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 70a448f11d829..94df8906aa215 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1851,7 +1851,7 @@ def __hash__(self): def __iter__(self): """Iterate over info axis - + Returns ------- iterator diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c5ac58f0674ac..6f2353a3ed2c3 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1188,7 +1188,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, def count(self): """ Compute count of group, excluding missing values. - + Returns ------- Series @@ -1493,7 +1493,7 @@ def _fill(self, direction, limit=None): def count(self): """ Compute count of group, excluding missing values - + Returns ------- DataFrame diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 2e9c7c1c14285..b102179b630be 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1205,7 +1205,7 @@ def std(self, ddof=1, *args, **kwargs): ---------- ddof : integer, default 1 degrees of freedom - + Returns ------- Series or DataFrame @@ -1297,7 +1297,7 @@ def groupby_function(name, alias, npfunc, _local_template = """ Compute %(f)s of group values - + Returns ------- Series or DataFrame @@ -2195,7 +2195,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None): .. versionadded:: 0.24.0 Returns - ------- + ------- Series or DataFrame Object shifted within each group. """ @@ -2218,7 +2218,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, Calculate pct_change of each value to previous entry in group. Returns - ------- + ------- Series or DataFrame Percentage changes within each group. """ @@ -2243,7 +2243,7 @@ def head(self, n=5): except ignores as_index flag. Returns - ------- + ------- Series or DataFrame %(see_also)s Examples @@ -2274,7 +2274,7 @@ def tail(self, n=5): except ignores as_index flag. Returns - ------- + ------- Series or DataFrame %(see_also)s Examples diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2a0df0af9b407..226e16f27ba27 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4379,7 +4379,7 @@ def get_value(self, series, key): Returns ------- scalar - A value in the series with the indice of the key value in self. + A value in the series with the indice of the key value in self. """ # if we have something that is Index-like, then diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 4909085433a5a..b191a37831efe 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -128,7 +128,7 @@ def ensure_int(value, field): def from_range(cls, data, name=None, dtype=None, **kwargs): """ Create RangeIndex from a range object. - + Returns ------- RangeIndex diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 3e9ccb00b35c9..e4f2254b47c77 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -798,7 +798,7 @@ def parse(self, Returns ------- DataFrame or dict of DataFrames - DataFrame from the passed in Excel file. + DataFrame from the passed in Excel file. """ # Can't use _deprecate_kwarg since sheetname=None has a special meaning From 484b69bd575db59500705be96c91d23ac2ace927 Mon Sep 17 00:00:00 2001 From: ihsan Date: Mon, 29 Apr 2019 00:48:57 +0300 Subject: [PATCH 13/19] DOC: Fix introduced docstring validation errors (#25356) --- pandas/core/arrays/categorical.py | 10 +++++----- pandas/core/generic.py | 2 +- pandas/core/groupby/groupby.py | 2 +- pandas/core/indexes/base.py | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 5257e7ff9d5c1..f050204292664 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -773,16 +773,16 @@ def as_unordered(self, inplace=False): """ Set the Categorical to be unordered. - Returns - ------- - Categorical - Unordered Categorical. - Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False. + + Returns + ------- + Categorical + Unordered Categorical. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 94df8906aa215..f5a319e66bbbc 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9322,7 +9322,7 @@ def tz_convert(self, tz, axis=0, level=None, copy=True): Returns ------- %(klass)s - %(klass)s with time zone converted axis. + Object with time zone converted axis. Raises ------ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b102179b630be..5569216c8cd7f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1301,7 +1301,7 @@ def groupby_function(name, alias, npfunc, Returns ------- Series or DataFrame - %(f)s of values within each group. + Computed %(f)s of values within each group. """ @Substitution(name='groupby', f=name) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 226e16f27ba27..bd1d5ee3f65f4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4819,16 +4819,16 @@ def get_slice_bound(self, label, side, kind): Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. - Returns - ------- - int - Indice of label. - Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} + + Returns + ------- + int + Indice of label. """ assert kind in ['ix', 'loc', 'getitem', None] From dc209bed3827b32bae33671314cd57429f2c5cfa Mon Sep 17 00:00:00 2001 From: ihsan Date: Mon, 29 Apr 2019 09:26:50 +0300 Subject: [PATCH 14/19] DOC: Add period to the end of first line where missing (#26234) --- pandas/core/generic.py | 2 +- pandas/core/groupby/generic.py | 2 +- pandas/core/groupby/groupby.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f5a319e66bbbc..bd58f9ac9702e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1850,7 +1850,7 @@ def __hash__(self): ' hashed'.format(self.__class__.__name__)) def __iter__(self): - """Iterate over info axis + """Iterate over info axis. Returns ------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 6f2353a3ed2c3..f3ee4f8a6ccd0 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1492,7 +1492,7 @@ def _fill(self, direction, limit=None): def count(self): """ - Compute count of group, excluding missing values + Compute count of group, excluding missing values. Returns ------- diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 5569216c8cd7f..a6fe06913be9e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1296,7 +1296,7 @@ def groupby_function(name, alias, npfunc, min_count=-1): _local_template = """ - Compute %(f)s of group values + Compute %(f)s of group values. Returns ------- From a011fa439ba56b6874b551466bcc80c62f8d31aa Mon Sep 17 00:00:00 2001 From: ihsan Date: Mon, 29 Apr 2019 10:22:31 +0300 Subject: [PATCH 15/19] DOC: Change boolean to bool (#26234) --- pandas/core/generic.py | 2 +- pandas/core/groupby/groupby.py | 4 ++-- pandas/core/indexes/base.py | 4 ++-- pandas/core/indexes/category.py | 2 +- pandas/core/indexes/multi.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bd58f9ac9702e..9322ef5b4754b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1521,7 +1521,7 @@ def bool(self): Returns ------- - boolean + bool Same single boolean value converted to bool type. """ v = self.squeeze() diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index a6fe06913be9e..910a02cab6881 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1074,7 +1074,7 @@ def any(self, skipna=True): Returns ------- - boolean + bool """ return self._bool_agg('any', skipna) @@ -1091,7 +1091,7 @@ def all(self, skipna=True): Returns ------- - boolean + bool """ return self._bool_agg('all', skipna) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index bd1d5ee3f65f4..092c99f68c177 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4075,7 +4075,7 @@ def equals(self, other): Returns ------- - boolean + bool If two Index objects have equal elements True, otherwise False. """ if self.is_(other): @@ -4101,7 +4101,7 @@ def identical(self, other): Returns ------- - boolean + bool If two Index objects have equal elements and same type True, otherwise False. """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 3617def970081..9bb1ec0e960f4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -287,7 +287,7 @@ def equals(self, other): Returns ------- - boolean + bool If two CategorialIndex objects have equal elements True, otherwise False. """ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index f919212dad6e7..7074e2e2122f8 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1565,7 +1565,7 @@ def is_lexsorted(self): Returns ------- - boolean + bool """ return self.lexsort_depth == self.nlevels From 85a82c48dad48c4a85a3ebecb1521f82a5eaa08f Mon Sep 17 00:00:00 2001 From: ihsan Date: Mon, 29 Apr 2019 13:58:41 +0300 Subject: [PATCH 16/19] DOC: Remove returned variable names from docstring (#26234) --- pandas/core/base.py | 2 +- pandas/core/generic.py | 2 +- pandas/core/indexes/base.py | 4 ++-- pandas/plotting/_core.py | 2 +- pandas/plotting/_misc.py | 2 +- pandas/tseries/offsets.py | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pandas/core/base.py b/pandas/core/base.py index 6002f695a5c25..3dd51b55379e8 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -659,7 +659,7 @@ def transpose(self, *args, **kwargs): Returns ------- - self: %(klass)s + %(klass)s """ nv.validate_transpose(args, kwargs) return self diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9322ef5b4754b..46e1f6b89f154 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1967,7 +1967,7 @@ def to_dense(self): Returns ------- - self: %(klass)s + %(klass)s Dense %(klass)s. """ # compat diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 092c99f68c177..91f3295a84740 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -694,7 +694,7 @@ def ravel(self, order='C'): Returns ------- - result: numpy.ndarray + numpy.ndarray Flattened array. See Also @@ -789,7 +789,7 @@ def astype(self, dtype, copy=True): Returns ------- - taken: numpy.ndarray + numpy.ndarray Elements of given indices. See Also diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 5835579b38156..846c1a0052ff2 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2479,7 +2479,7 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, Returns ------- - axes: matplotlib.AxesSubplot + matplotlib.AxesSubplot A histogram plot. See Also diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index b2e7e7c223ac7..b361ccec0e867 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -46,7 +46,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, Returns ------- - axes: numpy.ndarray + numpy.ndarray A matrix of scatter plots. Examples diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index d6fb7bf8cb700..d7a528497436e 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -337,7 +337,7 @@ def rollback(self, dt): Returns ------- - dt: TimeStamp + TimeStamp Rolled timestamp if not on offset, otherwise unchanged timestamp. """ dt = as_timestamp(dt) @@ -351,7 +351,7 @@ def rollforward(self, dt): Returns ------- - dt: TimeStamp + TimeStamp Rolled timestamp if not on offset, otherwise unchanged timestamp. """ dt = as_timestamp(dt) From bd21eea7ea2a8bcdef7c382f77f78d3db2dba898 Mon Sep 17 00:00:00 2001 From: Marc Garcia Date: Wed, 1 May 2019 20:49:33 +0300 Subject: [PATCH 17/19] DOC: Fix typo #26234 Co-Authored-By: ihsansecer --- pandas/core/groupby/generic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f3ee4f8a6ccd0..21ce0fe1cea08 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1003,7 +1003,7 @@ def nunique(self, dropna=True): Returns ------- Series - Numver of unique values within each group. + Number of unique values within each group. """ ids, _, _ = self.grouper.group_info From d5f80f7a7be4ff102418f8fecf7e621e472fa45f Mon Sep 17 00:00:00 2001 From: ihsan Date: Wed, 1 May 2019 21:12:12 +0300 Subject: [PATCH 18/19] DOC: Make minor style changes (#26234) --- pandas/core/generic.py | 3 ++- pandas/core/groupby/groupby.py | 2 +- pandas/core/indexes/base.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 46e1f6b89f154..9e95c53235f16 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1850,7 +1850,8 @@ def __hash__(self): ' hashed'.format(self.__class__.__name__)) def __iter__(self): - """Iterate over info axis. + """ + Iterate over info axis. Returns ------- diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 910a02cab6881..d6ea8f3facefb 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1627,7 +1627,7 @@ def nth(self, n, dropna=None): Returns ------- Series or DataFrame - Nth value within each group. + N-th value within each group. %(see_also)s Examples -------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 91f3295a84740..8f0bf668e59ac 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4379,7 +4379,7 @@ def get_value(self, series, key): Returns ------- scalar - A value in the series with the indice of the key value in self. + A value in the Series with the index of the key value in self. """ # if we have something that is Index-like, then @@ -4828,7 +4828,7 @@ def get_slice_bound(self, label, side, kind): Returns ------- int - Indice of label. + Index of label. """ assert kind in ['ix', 'loc', 'getitem', None] From d32306ce754ed16e92d3e50f2086de8128d257d4 Mon Sep 17 00:00:00 2001 From: ihsan Date: Tue, 7 May 2019 08:47:19 +0300 Subject: [PATCH 19/19] DOC: Remove return section from None returning functions (#26234) --- pandas/core/frame.py | 4 ---- pandas/core/generic.py | 8 -------- pandas/util/testing.py | 4 ---- 3 files changed, 16 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b606b3f9327a3..674b1474158f6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1388,10 +1388,6 @@ def to_gbq(self, destination_table, project_id=None, chunksize=None, or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). - Returns - ------- - None - See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9e95c53235f16..e0a0505e47c67 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2392,10 +2392,6 @@ def to_hdf(self, path_or_buf, key, **kwargs): See the errors argument for :func:`open` for a full list of options. - Returns - ------- - None - See Also -------- DataFrame.read_hdf : Read from HDF file. @@ -2621,10 +2617,6 @@ def to_pickle(self, path, compression='infer', .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 - Returns - ------- - None - See Also -------- read_pickle : Load pickled pandas object (or any object) from file. diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 1ecf18ce5a114..9af5410e0738b 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1007,10 +1007,6 @@ def assert_series_equal(left, right, check_dtype=True, obj : str, default 'Series' Specify object name being compared, internally used to show appropriate assertion message. - - Returns - ------- - None """ __tracebackhide__ = True