Skip to content

BUG: errors and segfaults in groupby cython transforms (#16771) #26134

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 20, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion doc/source/whatsnew/v0.25.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,8 @@ Groupby/Resample/Rolling
- Ensured that ordering of outputs in ``groupby`` aggregation functions is consistent across all versions of Python (:issue:`25692`)
- Ensured that result group order is correct when grouping on an ordered ``Categorical`` and specifying ``observed=True`` (:issue:`25871`, :issue:`25167`)
- Bug in :meth:`pandas.core.window.Rolling.min` and :meth:`pandas.core.window.Rolling.max` that caused a memory leak (:issue:`25893`)
- Bug in :func:`idxmax` and :func:`idxmin` on :meth:`DataFrame.groupby` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`)
- Bug in :meth:`pandas.core.groupby.GroupBy.idxmax` and :meth:`pandas.core.groupby.GroupBy.idxmin` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`)
- Bug in :meth:`pandas.core.groupby.GroupBy.cumsum`, :meth:`pandas.core.groupby.GroupBy.cumprod`, :meth:`pandas.core.groupby.GroupBy.cummin` and :meth:`pandas.core.groupby.GroupBy.cummax` with categorical column having absent categories, would return incorrect result or segfault (:issue:`16771`)

Reshaping
^^^^^^^^^
Expand Down
52 changes: 46 additions & 6 deletions pandas/_libs/groupby.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -142,19 +142,39 @@ def group_median_float64(ndarray[float64_t, ndim=2] out,
def group_cumprod_float64(float64_t[:, :] out,
const float64_t[:, :] values,
const int64_t[:] labels,
int ngroups,
bint is_datetimelike,
bint skipna=True):
"""Cumulative product of columns of `values`, in row groups `labels`.

Parameters
----------
out : float64 array
Array to store cumprod in.
values : float64 array
Values to take cumprod of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
Always false, `values` is never datetime-like.
skipna : bool
If true, ignore nans in `values`.

Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
Only transforms on axis=0
"""

cdef:
Py_ssize_t i, j, N, K, size
float64_t val
float64_t[:, :] accum
int64_t lab

N, K = (<object>values).shape
accum = np.ones_like(values)
accum = np.ones((ngroups, K), dtype=np.float64)

with nogil:
for i in range(N):
Expand All @@ -179,19 +199,39 @@ def group_cumprod_float64(float64_t[:, :] out,
def group_cumsum(numeric[:, :] out,
numeric[:, :] values,
const int64_t[:] labels,
int ngroups,
is_datetimelike,
bint skipna=True):
"""Cumulative sum of columns of `values`, in row groups `labels`.

Parameters
----------
out : array
Array to store cumsum in.
values : array
Values to take cumsum of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
True if `values` contains datetime-like entries.
skipna : bool
If true, ignore nans in `values`.

Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
Only transforms on axis=0
"""

cdef:
Py_ssize_t i, j, N, K, size
numeric val
numeric[:, :] accum
int64_t lab

N, K = (<object>values).shape
accum = np.zeros_like(values)
accum = np.zeros((ngroups, K), dtype=np.asarray(values).dtype)

with nogil:
for i in range(N):
Expand Down
48 changes: 42 additions & 6 deletions pandas/_libs/groupby_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -474,18 +474,36 @@ def group_min(groupby_t[:, :] out,
def group_cummin(groupby_t[:, :] out,
groupby_t[:, :] values,
const int64_t[:] labels,
int ngroups,
bint is_datetimelike):
"""Cumulative minimum of columns of `values`, in row groups `labels`.

Parameters
----------
out : array
Array to store cummin in.
values : array
Values to take cummin of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
True if `values` contains datetime-like entries.

Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
Only transforms on axis=0
"""

cdef:
Py_ssize_t i, j, N, K, size
groupby_t val, mval
ndarray[groupby_t, ndim=2] accum
int64_t lab

N, K = (<object>values).shape
accum = np.empty_like(values)
accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype)
if groupby_t is int64_t:
accum[:] = _int64_max
else:
Expand Down Expand Up @@ -522,18 +540,36 @@ def group_cummin(groupby_t[:, :] out,
def group_cummax(groupby_t[:, :] out,
groupby_t[:, :] values,
const int64_t[:] labels,
int ngroups,
bint is_datetimelike):
"""Cumulative maximum of columns of `values`, in row groups `labels`.

Parameters
----------
out : array
Array to store cummax in.
values : array
Values to take cummax of.
labels : int64 array
Labels to group by.
ngroups : int
Number of groups, larger than all entries of `labels`.
is_datetimelike : bool
True if `values` contains datetime-like entries.

Notes
-----
This method modifies the `out` parameter, rather than returning an object.
"""
Only transforms on axis=0
"""

cdef:
Py_ssize_t i, j, N, K, size
groupby_t val, mval
ndarray[groupby_t, ndim=2] accum
int64_t lab

N, K = (<object>values).shape
accum = np.empty_like(values)
accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype)
if groupby_t is int64_t:
accum[:] = -_int64_max
else:
Expand Down
9 changes: 5 additions & 4 deletions pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,8 +361,8 @@ def get_group_levels(self):
'cummax': 'group_cummax',
'rank': {
'name': 'group_rank',
'f': lambda func, a, b, c, d, **kwargs: func(
a, b, c, d,
'f': lambda func, a, b, c, d, e, **kwargs: func(
a, b, c, e,
kwargs.get('ties_method', 'average'),
kwargs.get('ascending', True),
kwargs.get('pct', False),
Expand Down Expand Up @@ -600,9 +600,10 @@ def _transform(self, result, values, comp_ids, transform_func,
for i, chunk in enumerate(values.transpose(2, 0, 1)):

transform_func(result[:, :, i], values,
comp_ids, is_datetimelike, **kwargs)
comp_ids, ngroups, is_datetimelike, **kwargs)
else:
transform_func(result, values, comp_ids, is_datetimelike, **kwargs)
transform_func(result, values, comp_ids, ngroups, is_datetimelike,
**kwargs)

return result

Expand Down
27 changes: 22 additions & 5 deletions pandas/tests/groupby/test_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype

import pandas as pd
from pandas import DataFrame, MultiIndex, Series, Timestamp, concat, date_range
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, concat, date_range)
from pandas.core.groupby.groupby import DataError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
Expand Down Expand Up @@ -470,7 +471,8 @@ def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
ans = np.zeros_like(data)

labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, is_datetimelike)
ngroups = 1
pd_op(ans, data, labels, ngroups, is_datetimelike)

tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
Expand All @@ -496,17 +498,19 @@ def test_cython_group_transform_algos():

# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
ngroups = 1

data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
groupby.group_cumprod_float64(actual, data, labels, ngroups,
is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)

actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumsum(actual, data, labels, is_datetimelike)
groupby.group_cumsum(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)

Expand All @@ -515,7 +519,7 @@ def test_cython_group_transform_algos():
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
is_datetimelike)
ngroups, is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
Expand Down Expand Up @@ -863,3 +867,16 @@ def test_groupby_transform_with_datetimes(func, values):
index=dates, name="price")

tm.assert_series_equal(result, expected)


@pytest.mark.parametrize('func', ['cumsum', 'cumprod', 'cummin', 'cummax'])
def test_transform_absent_categories(func):
# GH 16771
# cython transforms with more groups than rows
x_vals = [1]
x_cats = range(2)
y = [1]
df = DataFrame(dict(x=Categorical(x_vals, x_cats), y=y))
result = getattr(df.y.groupby(df.x), func)()
expected = df.y
assert_series_equal(result, expected)