diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 4b7b075ceafaf..93759feef4b3e 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -324,6 +324,7 @@ Bug fixes - Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`) - Fixed bug in :meth:`Series.rank` that doesn't preserve missing values for nullable integers when ``na_option='keep'``. (:issue:`56976`) - Fixed bug in :meth:`Series.replace` and :meth:`DataFrame.replace` inconsistently replacing matching instances when ``regex=True`` and missing values are present. (:issue:`56599`) +- Fixed bug in :meth:`Series.rolling.kurt` with small sized values arrays with low variance getting zeroed out even when numerically stable (:issue:`57972`) Categorical ^^^^^^^^^^^ diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 6365c030b695b..27d2aae48579f 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -712,7 +712,8 @@ cdef float64_t calc_kurt(int64_t minp, int64_t nobs, # if the variance is less than 1e-14, it could be # treat as zero, here we follow the original # skew/kurt behaviour to check B <= 1e-14 - if B <= 1e-14: + # #57972: for small arrays the cutoff can be lowered + if B <= 1e-14 and nobs > 100 or B <= 1e-16: result = NaN else: K = (dnobs * dnobs - 1.) * D / (B * B) - 3 * ((dnobs - 1.) ** 2) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b68337d9e0de9..4a16b09439ee9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1357,9 +1357,13 @@ def nankurt( # floating point error # # #18044 in _libs/windows.pyx calc_kurt follow this behavior - # to fix the fperr to treat denom <1e-14 as zero - numerator = _zero_out_fperr(numerator) - denominator = _zero_out_fperr(denominator) + # to fix the fperr to treat denom <1e-14 as zero (default cutoff) + # GH-57972 set cutoff lower for small arrays to prevent cutoff of otherwise + # numerically stable values + length = count[0] if isinstance(count, np.ndarray) else count + cutoff = 1e-14 if length > 100 else 1e-16 + numerator = _zero_out_fperr(numerator, cutoff) + denominator = _zero_out_fperr(denominator, cutoff) if not isinstance(denominator, np.ndarray): # if ``denom`` is a scalar, check these corner cases first before @@ -1576,12 +1580,12 @@ def check_below_min_count( return False -def _zero_out_fperr(arg): +def _zero_out_fperr(arg, cutoff=1e-14): # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): - return np.where(np.abs(arg) < 1e-14, 0, arg) + return np.where(np.abs(arg) < cutoff, 0, arg) else: - return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg + return arg.dtype.type(0) if np.abs(arg) < cutoff else arg @disallow("M8", "m8") diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index ce41f1e76de79..537de5832ed23 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1105,6 +1105,18 @@ def test_nans_skipna(self, samples, actual_kurt): kurt = nanops.nankurt(samples, skipna=True) tm.assert_almost_equal(kurt, actual_kurt) + def test_small_arrays_with_low_variance(self): + # GH-57972 + # small sample arrays with low variance have a lower threshold for breakdown + # of numerical stability and should be handled accordingly + low_var_samples = np.array( + [-2.05191341e-05] + [0.0e00] * 4 + [-4.10391103e-05] + [0.0e00] * 23 + ) + # calculated with scipy.status kurtosis(low_var_samples, bias=False) + scipy_kurt = 18.087646853025614 + kurt = nanops.nankurt(low_var_samples) + tm.assert_almost_equal(kurt, scipy_kurt) + @property def prng(self): return np.random.default_rng(2)