From 96a87f33467f214a84df91e36fd3d8c3d6f7b1fb Mon Sep 17 00:00:00 2001 From: Derek Knowlton Date: Thu, 5 Dec 2024 15:45:29 -0800 Subject: [PATCH 1/4] fixed resample.Resampler docstring errors in min, max, mean, prod, std, var as indicated by vaildate_docstrings.py --- ci/code_checks.sh | 6 -- pandas/core/resample.py | 183 ++++++++++++++++++++++++++++++---------- 2 files changed, 137 insertions(+), 52 deletions(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index adc5bc9a01bdd..1e81cb5b5f0b2 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -87,14 +87,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01" \ -i "pandas.core.groupby.DataFrameGroupBy.plot PR02" \ -i "pandas.core.groupby.SeriesGroupBy.plot PR02" \ - -i "pandas.core.resample.Resampler.max PR01,RT03,SA01" \ - -i "pandas.core.resample.Resampler.mean SA01" \ - -i "pandas.core.resample.Resampler.min PR01,RT03,SA01" \ - -i "pandas.core.resample.Resampler.prod SA01" \ -i "pandas.core.resample.Resampler.quantile PR01,PR07" \ - -i "pandas.core.resample.Resampler.std SA01" \ -i "pandas.core.resample.Resampler.transform PR01,RT03,SA01" \ - -i "pandas.core.resample.Resampler.var SA01" \ -i "pandas.errors.NullFrequencyError SA01" \ -i "pandas.errors.NumbaUtilError SA01" \ -i "pandas.errors.PerformanceWarning SA01" \ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index fdfb9f21bdb9f..d4af3753e9c41 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1,6 +1,7 @@ from __future__ import annotations import copy +import warnings from textwrap import dedent from typing import ( TYPE_CHECKING, @@ -10,10 +11,10 @@ no_type_check, overload, ) -import warnings import numpy as np +import pandas.core.algorithms as algos from pandas._libs import lib from pandas._libs.tslibs import ( BaseOffset, @@ -25,17 +26,12 @@ to_offset, ) from pandas._typing import NDFrameT -from pandas.errors import AbstractMethodError -from pandas.util._decorators import ( - Appender, - Substitution, - doc, -) -from pandas.util._exceptions import ( - find_stack_level, - rewrite_warning, +from pandas.core.apply import ResamplerWindowApply +from pandas.core.arrays import ArrowExtensionArray +from pandas.core.base import ( + PandasObject, + SelectionMixin, ) - from pandas.core.dtypes.dtypes import ( ArrowDtype, PeriodDtype, @@ -44,14 +40,6 @@ ABCDataFrame, ABCSeries, ) - -import pandas.core.algorithms as algos -from pandas.core.apply import ResamplerWindowApply -from pandas.core.arrays import ArrowExtensionArray -from pandas.core.base import ( - PandasObject, - SelectionMixin, -) from pandas.core.generic import ( NDFrame, _shared_docs, @@ -80,7 +68,7 @@ timedelta_range, ) from pandas.core.reshape.concat import concat - +from pandas.errors import AbstractMethodError from pandas.tseries.frequencies import ( is_subperiod, is_superperiod, @@ -89,6 +77,15 @@ Day, Tick, ) +from pandas.util._decorators import ( + Appender, + Substitution, + doc, +) +from pandas.util._exceptions import ( + find_stack_level, + rewrite_warning, +) if TYPE_CHECKING: from collections.abc import ( @@ -96,6 +93,10 @@ Hashable, ) + from pandas import ( + DataFrame, + Series, + ) from pandas._typing import ( Any, AnyArrayLike, @@ -114,11 +115,6 @@ npt, ) - from pandas import ( - DataFrame, - Series, - ) - _shared_docs_kwargs: dict[str, str] = {} @@ -360,7 +356,8 @@ def pipe( axis="", ) def aggregate(self, func=None, *args, **kwargs): - result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() + result = ResamplerWindowApply( + self, func, args=args, kwargs=kwargs).agg() if result is None: how = func result = self._groupby_and_aggregate(how, *args, **kwargs) @@ -447,13 +444,14 @@ def _groupby_and_aggregate(self, how, *args, **kwargs): # Excludes `on` column when provided obj = self._obj_with_exclusions - grouped = get_groupby(obj, by=None, grouper=grouper, group_keys=self.group_keys) + grouped = get_groupby(obj, by=None, grouper=grouper, + group_keys=self.group_keys) try: if callable(how): # TODO: test_resample_apply_with_additional_args fails if we go # through the non-lambda path, not clear that it should. - func = lambda x: how(x, *args, **kwargs) + def func(x): return how(x, *args, **kwargs) result = grouped.aggregate(func) else: result = grouped.aggregate(how, *args, **kwargs) @@ -1099,6 +1097,13 @@ def prod( Series or DataFrame Computed prod of values within each group. + See Also + -------- + core.resample.Resampler.sum : Compute sum of groups, excluding missing values. + core.resample.Resampler.mean : Compute mean of groups, excluding missing values. + core.resample.Resampler.median : Compute median of groups, excluding missing + values. + Examples -------- >>> ser = pd.Series( @@ -1129,9 +1134,30 @@ def min( """ Compute min value of group. + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + + min_count : int, default 0 + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + Returns ------- Series or DataFrame + Compute the minimum value in the given Series or DataFrame. + + See Also + -------- + core.resample.Resampler.max : Compute max value of group. + core.resample.Resampler.mean : Compute mean of groups, excluding missing values. + core.resample.Resampler.median : Compute median of groups, excluding missing + values. Examples -------- @@ -1163,9 +1189,30 @@ def max( """ Compute max value of group. + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + + min_count : int, default 0 + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + Returns ------- Series or DataFrame + Computes the maximum value in the given Series or Dataframe. + + See Also + -------- + core.resample.Resampler.min : Compute min value of group. + core.resample.Resampler.mean : Compute mean of groups, excluding missing values. + core.resample.Resampler.median : Compute median of groups, excluding missing + values. Examples -------- @@ -1239,6 +1286,16 @@ def mean( DataFrame or Series Mean of values within each group. + See Also + -------- + core.resample.Resampler.median : Compute median of groups, excluding missing + values. + core.resample.Resampler.sum : Compute sum of groups, excluding missing values. + core.resample.Resampler.std : Compute standard deviation of groups, excluding + missing values. + core.resample.Resampler.var : Compute variance of groups, excluding missing + values. + Examples -------- @@ -1288,6 +1345,14 @@ def std( DataFrame or Series Standard deviation of values within each group. + See Also + -------- + core.resample.Resampler.mean : Compute mean of groups, excluding missing values. + core.resample.Resampler.median : Compute median of groups, excluding missing + values. + core.resample.Resampler.var : Compute variance of groups, excluding missing + values. + Examples -------- @@ -1339,6 +1404,14 @@ def var( DataFrame or Series Variance of values within each group. + See Also + -------- + core.resample.Resampler.std : Compute standard deviation of groups, excluding + missing values. + core.resample.Resampler.mean : Compute mean of groups, excluding missing values. + core.resample.Resampler.median : Compute median of groups, excluding missing + values. + Examples -------- @@ -1574,14 +1647,16 @@ def _apply(self, f, *args, **kwargs): """ def func(x): - x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax) + x = self._resampler_cls( + x, timegrouper=self._timegrouper, gpr_index=self.ax) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) - result = _apply(self._groupby, func, include_groups=self.include_groups) + result = _apply(self._groupby, func, + include_groups=self.include_groups) return self._wrap_result(result) _upsample = _apply @@ -1999,14 +2074,17 @@ def __init__( if closed not in {None, "left", "right"}: raise ValueError(f"Unsupported value {closed} for `closed`") if convention not in {None, "start", "end", "e", "s"}: - raise ValueError(f"Unsupported value {convention} for `convention`") + raise ValueError( + f"Unsupported value {convention} for `convention`") if ( - (key is None and obj is not None and isinstance(obj.index, PeriodIndex)) # type: ignore[attr-defined] + (key is None and obj is not None and isinstance( + obj.index, PeriodIndex)) # type: ignore[attr-defined] or ( key is not None and obj is not None - and getattr(obj[key], "dtype", None) == "period" # type: ignore[index] + # type: ignore[index] + and getattr(obj[key], "dtype", None) == "period" ) ): freq = to_offset(freq, is_period=True) @@ -2226,7 +2304,8 @@ def _adjust_bin_edges( edges_dti = binner.tz_localize(None) edges_dti = ( edges_dti - + Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit) + + Timedelta(days=1, + unit=edges_dti.unit).as_unit(edges_dti.unit) - Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit) ) bin_edges = edges_dti.tz_localize(binner.tz).asi8 @@ -2256,7 +2335,8 @@ def _get_time_delta_bins(self, ax: TimedeltaIndex): ) if not len(ax): - binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name) + binner = labels = TimedeltaIndex( + data=[], freq=self.freq, name=ax.name) return binner, [], labels start, end = ax.min(), ax.max() @@ -2295,7 +2375,8 @@ def _get_time_period_bins(self, ax: DatetimeIndex): ) return binner, [], labels - labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name) + labels = binner = period_range( + start=ax[0], end=ax[-1], freq=freq, name=ax.name) end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp() if ax.tz: @@ -2324,10 +2405,12 @@ def _get_period_bins(self, ax: PeriodIndex): if not len(memb): # index contains no valid (non-NaT) values bins = np.array([], dtype=np.int64) - binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name) + binner = labels = PeriodIndex( + data=[], freq=self.freq, name=ax.name) if len(ax) > 0: # index is all NaT - binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax)) + binner, bins, labels = _insert_nat_bin( + binner, bins, labels, len(ax)) return binner, bins, labels freq_mult = self.freq.n @@ -2351,7 +2434,8 @@ def _get_period_bins(self, ax: PeriodIndex): ) # Get offset for bin edge (not label edge) adjustment - start_offset = Period(start, self.freq) - Period(p_start, self.freq) + start_offset = Period(start, self.freq) - \ + Period(p_start, self.freq) # error: Item "Period" of "Union[Period, Any]" has no attribute "n" bin_shift = start_offset.n % freq_mult # type: ignore[union-attr] start = p_start @@ -2375,7 +2459,8 @@ def _get_period_bins(self, ax: PeriodIndex): bins = memb.searchsorted(prng, side="left") if nat_count > 0: - binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count) + binner, bins, labels = _insert_nat_bin( + binner, bins, labels, nat_count) return binner, bins, labels @@ -2412,7 +2497,8 @@ def _take_new_index( new_values = algos.take_nd(obj._values, indexer) return obj._constructor(new_values, index=new_index, name=obj.name) elif isinstance(obj, ABCDataFrame): - new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) + new_mgr = obj._mgr.reindex_indexer( + new_axis=new_index, indexer=indexer, axis=1) return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) else: raise ValueError("'obj' should be either a Series or a DataFrame") @@ -2462,7 +2548,8 @@ def _get_timestamp_range_edges( if isinstance(freq, Tick): index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): - raise ValueError("The origin must have the same timezone as the index.") + raise ValueError( + "The origin must have the same timezone as the index.") if origin == "epoch": # set the epoch based on the timezone to have similar bins results when # resampling on the same kind of indexes on different timezones @@ -2691,7 +2778,8 @@ def asfreq( if isinstance(obj.index, DatetimeIndex): # TODO: should we disallow non-DatetimeIndex? unit = obj.index.unit - dti = date_range(obj.index.min(), obj.index.max(), freq=freq, unit=unit) + dti = date_range(obj.index.min(), obj.index.max(), + freq=freq, unit=unit) dti.name = obj.index.name new_obj = obj.reindex(dti, method=method, fill_value=fill_value) if normalize: @@ -2721,9 +2809,11 @@ def _asfreq_compat(index: FreqIndexT, freq) -> FreqIndexT: if isinstance(index, PeriodIndex): new_index = index.asfreq(freq=freq) elif isinstance(index, DatetimeIndex): - new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name) + new_index = DatetimeIndex( + [], dtype=index.dtype, freq=freq, name=index.name) elif isinstance(index, TimedeltaIndex): - new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name) + new_index = TimedeltaIndex( + [], dtype=index.dtype, freq=freq, name=index.name) else: # pragma: no cover raise TypeError(type(index)) return new_index @@ -2740,5 +2830,6 @@ def _apply( target_category=DeprecationWarning, new_message=new_message, ): - result = grouped.apply(how, *args, include_groups=include_groups, **kwargs) + result = grouped.apply( + how, *args, include_groups=include_groups, **kwargs) return result From c1093e54e5dcfa802148ec29b9a9e4872be8c58c Mon Sep 17 00:00:00 2001 From: Derek Knowlton Date: Thu, 5 Dec 2024 15:56:05 -0800 Subject: [PATCH 2/4] fixed resample.Resampler docstring errors in min, max, mean, prod, std, var as indicated by vaildate_docstrings.py --- pandas/core/resample.py | 110 +++++++++++++++++----------------------- 1 file changed, 47 insertions(+), 63 deletions(-) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index d4af3753e9c41..d22764cbb7275 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1,7 +1,6 @@ from __future__ import annotations import copy -import warnings from textwrap import dedent from typing import ( TYPE_CHECKING, @@ -11,10 +10,10 @@ no_type_check, overload, ) +import warnings import numpy as np -import pandas.core.algorithms as algos from pandas._libs import lib from pandas._libs.tslibs import ( BaseOffset, @@ -26,12 +25,17 @@ to_offset, ) from pandas._typing import NDFrameT -from pandas.core.apply import ResamplerWindowApply -from pandas.core.arrays import ArrowExtensionArray -from pandas.core.base import ( - PandasObject, - SelectionMixin, +from pandas.errors import AbstractMethodError +from pandas.util._decorators import ( + Appender, + Substitution, + doc, ) +from pandas.util._exceptions import ( + find_stack_level, + rewrite_warning, +) + from pandas.core.dtypes.dtypes import ( ArrowDtype, PeriodDtype, @@ -40,6 +44,14 @@ ABCDataFrame, ABCSeries, ) + +import pandas.core.algorithms as algos +from pandas.core.apply import ResamplerWindowApply +from pandas.core.arrays import ArrowExtensionArray +from pandas.core.base import ( + PandasObject, + SelectionMixin, +) from pandas.core.generic import ( NDFrame, _shared_docs, @@ -68,7 +80,7 @@ timedelta_range, ) from pandas.core.reshape.concat import concat -from pandas.errors import AbstractMethodError + from pandas.tseries.frequencies import ( is_subperiod, is_superperiod, @@ -77,15 +89,6 @@ Day, Tick, ) -from pandas.util._decorators import ( - Appender, - Substitution, - doc, -) -from pandas.util._exceptions import ( - find_stack_level, - rewrite_warning, -) if TYPE_CHECKING: from collections.abc import ( @@ -93,10 +96,6 @@ Hashable, ) - from pandas import ( - DataFrame, - Series, - ) from pandas._typing import ( Any, AnyArrayLike, @@ -115,6 +114,11 @@ npt, ) + from pandas import ( + DataFrame, + Series, + ) + _shared_docs_kwargs: dict[str, str] = {} @@ -356,8 +360,7 @@ def pipe( axis="", ) def aggregate(self, func=None, *args, **kwargs): - result = ResamplerWindowApply( - self, func, args=args, kwargs=kwargs).agg() + result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: how = func result = self._groupby_and_aggregate(how, *args, **kwargs) @@ -444,14 +447,13 @@ def _groupby_and_aggregate(self, how, *args, **kwargs): # Excludes `on` column when provided obj = self._obj_with_exclusions - grouped = get_groupby(obj, by=None, grouper=grouper, - group_keys=self.group_keys) + grouped = get_groupby(obj, by=None, grouper=grouper, group_keys=self.group_keys) try: if callable(how): # TODO: test_resample_apply_with_additional_args fails if we go # through the non-lambda path, not clear that it should. - def func(x): return how(x, *args, **kwargs) + func = lambda x: how(x, *args, **kwargs) result = grouped.aggregate(func) else: result = grouped.aggregate(how, *args, **kwargs) @@ -1647,16 +1649,14 @@ def _apply(self, f, *args, **kwargs): """ def func(x): - x = self._resampler_cls( - x, timegrouper=self._timegrouper, gpr_index=self.ax) + x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) - result = _apply(self._groupby, func, - include_groups=self.include_groups) + result = _apply(self._groupby, func, include_groups=self.include_groups) return self._wrap_result(result) _upsample = _apply @@ -2074,17 +2074,14 @@ def __init__( if closed not in {None, "left", "right"}: raise ValueError(f"Unsupported value {closed} for `closed`") if convention not in {None, "start", "end", "e", "s"}: - raise ValueError( - f"Unsupported value {convention} for `convention`") + raise ValueError(f"Unsupported value {convention} for `convention`") if ( - (key is None and obj is not None and isinstance( - obj.index, PeriodIndex)) # type: ignore[attr-defined] + (key is None and obj is not None and isinstance(obj.index, PeriodIndex)) # type: ignore[attr-defined] or ( key is not None and obj is not None - # type: ignore[index] - and getattr(obj[key], "dtype", None) == "period" + and getattr(obj[key], "dtype", None) == "period" # type: ignore[index] ) ): freq = to_offset(freq, is_period=True) @@ -2304,8 +2301,7 @@ def _adjust_bin_edges( edges_dti = binner.tz_localize(None) edges_dti = ( edges_dti - + Timedelta(days=1, - unit=edges_dti.unit).as_unit(edges_dti.unit) + + Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit) - Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit) ) bin_edges = edges_dti.tz_localize(binner.tz).asi8 @@ -2335,8 +2331,7 @@ def _get_time_delta_bins(self, ax: TimedeltaIndex): ) if not len(ax): - binner = labels = TimedeltaIndex( - data=[], freq=self.freq, name=ax.name) + binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels start, end = ax.min(), ax.max() @@ -2375,8 +2370,7 @@ def _get_time_period_bins(self, ax: DatetimeIndex): ) return binner, [], labels - labels = binner = period_range( - start=ax[0], end=ax[-1], freq=freq, name=ax.name) + labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name) end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp() if ax.tz: @@ -2405,12 +2399,10 @@ def _get_period_bins(self, ax: PeriodIndex): if not len(memb): # index contains no valid (non-NaT) values bins = np.array([], dtype=np.int64) - binner = labels = PeriodIndex( - data=[], freq=self.freq, name=ax.name) + binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name) if len(ax) > 0: # index is all NaT - binner, bins, labels = _insert_nat_bin( - binner, bins, labels, len(ax)) + binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax)) return binner, bins, labels freq_mult = self.freq.n @@ -2434,8 +2426,7 @@ def _get_period_bins(self, ax: PeriodIndex): ) # Get offset for bin edge (not label edge) adjustment - start_offset = Period(start, self.freq) - \ - Period(p_start, self.freq) + start_offset = Period(start, self.freq) - Period(p_start, self.freq) # error: Item "Period" of "Union[Period, Any]" has no attribute "n" bin_shift = start_offset.n % freq_mult # type: ignore[union-attr] start = p_start @@ -2459,8 +2450,7 @@ def _get_period_bins(self, ax: PeriodIndex): bins = memb.searchsorted(prng, side="left") if nat_count > 0: - binner, bins, labels = _insert_nat_bin( - binner, bins, labels, nat_count) + binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count) return binner, bins, labels @@ -2497,8 +2487,7 @@ def _take_new_index( new_values = algos.take_nd(obj._values, indexer) return obj._constructor(new_values, index=new_index, name=obj.name) elif isinstance(obj, ABCDataFrame): - new_mgr = obj._mgr.reindex_indexer( - new_axis=new_index, indexer=indexer, axis=1) + new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) else: raise ValueError("'obj' should be either a Series or a DataFrame") @@ -2548,8 +2537,7 @@ def _get_timestamp_range_edges( if isinstance(freq, Tick): index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): - raise ValueError( - "The origin must have the same timezone as the index.") + raise ValueError("The origin must have the same timezone as the index.") if origin == "epoch": # set the epoch based on the timezone to have similar bins results when # resampling on the same kind of indexes on different timezones @@ -2778,8 +2766,7 @@ def asfreq( if isinstance(obj.index, DatetimeIndex): # TODO: should we disallow non-DatetimeIndex? unit = obj.index.unit - dti = date_range(obj.index.min(), obj.index.max(), - freq=freq, unit=unit) + dti = date_range(obj.index.min(), obj.index.max(), freq=freq, unit=unit) dti.name = obj.index.name new_obj = obj.reindex(dti, method=method, fill_value=fill_value) if normalize: @@ -2809,11 +2796,9 @@ def _asfreq_compat(index: FreqIndexT, freq) -> FreqIndexT: if isinstance(index, PeriodIndex): new_index = index.asfreq(freq=freq) elif isinstance(index, DatetimeIndex): - new_index = DatetimeIndex( - [], dtype=index.dtype, freq=freq, name=index.name) + new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name) elif isinstance(index, TimedeltaIndex): - new_index = TimedeltaIndex( - [], dtype=index.dtype, freq=freq, name=index.name) + new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name) else: # pragma: no cover raise TypeError(type(index)) return new_index @@ -2830,6 +2815,5 @@ def _apply( target_category=DeprecationWarning, new_message=new_message, ): - result = grouped.apply( - how, *args, include_groups=include_groups, **kwargs) - return result + result = grouped.apply(how, *args, include_groups=include_groups, **kwargs) + return result \ No newline at end of file From ac16b633f328f452cb413ef6993d47e174979d65 Mon Sep 17 00:00:00 2001 From: Derek Knowlton Date: Thu, 5 Dec 2024 16:20:20 -0800 Subject: [PATCH 3/4] fixed trailing whitespace on line 1161 --- pandas/core/resample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index d22764cbb7275..d5aa0a17cb7f2 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1158,7 +1158,7 @@ def min( -------- core.resample.Resampler.max : Compute max value of group. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. - core.resample.Resampler.median : Compute median of groups, excluding missing + core.resample.Resampler.median : Compute median of groups, excluding missing values. Examples From 5a4535e9c343d20b865aa079b45c535cc069196b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 8 Dec 2024 05:59:55 +0000 Subject: [PATCH 4/4] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- pandas/core/resample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index d5aa0a17cb7f2..7c643bd288a72 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -2816,4 +2816,4 @@ def _apply( new_message=new_message, ): result = grouped.apply(how, *args, include_groups=include_groups, **kwargs) - return result \ No newline at end of file + return result