Skip to content

Consolidate nth / last object Groupby Implementations #19610

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 10, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 0 additions & 99 deletions pandas/_libs/groupby.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -25,105 +25,6 @@ cdef double NaN = <double> np.NaN
cdef double nan = NaN


# TODO: aggregate multiple columns in single pass
# ----------------------------------------------------------------------
# first, nth, last


@cython.boundscheck(False)
@cython.wraparound(False)
def group_nth_object(ndarray[object, ndim=2] out,
ndarray[int64_t] counts,
ndarray[object, ndim=2] values,
ndarray[int64_t] labels,
int64_t rank,
Py_ssize_t min_count=-1):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab
object val
float64_t count
ndarray[int64_t, ndim=2] nobs
ndarray[object, ndim=2] resx

assert min_count == -1, "'min_count' only used in add and prod"

nobs = np.zeros((<object> out).shape, dtype=np.int64)
resx = np.empty((<object> out).shape, dtype=object)

N, K = (<object> values).shape

for i in range(N):
lab = labels[i]
if lab < 0:
continue

counts[lab] += 1
for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, j] += 1
if nobs[lab, j] == rank:
resx[lab, j] = val

for i in range(len(counts)):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = <object> nan
else:
out[i, j] = resx[i, j]


@cython.boundscheck(False)
@cython.wraparound(False)
def group_last_object(ndarray[object, ndim=2] out,
ndarray[int64_t] counts,
ndarray[object, ndim=2] values,
ndarray[int64_t] labels,
Py_ssize_t min_count=-1):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, lab
object val
float64_t count
ndarray[object, ndim=2] resx
ndarray[int64_t, ndim=2] nobs

assert min_count == -1, "'min_count' only used in add and prod"

nobs = np.zeros((<object> out).shape, dtype=np.int64)
resx = np.empty((<object> out).shape, dtype=object)

N, K = (<object> values).shape

for i in range(N):
lab = labels[i]
if lab < 0:
continue

counts[lab] += 1
for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, j] += 1
resx[lab, j] = val

for i in range(len(counts)):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = nan
else:
out[i, j] = resx[i, j]


cdef inline float64_t median_linear(float64_t* a, int n) nogil:
cdef int i, j, na_count = 0
cdef float64_t result
Expand Down
32 changes: 20 additions & 12 deletions pandas/_libs/groupby_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,8 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
# name, c_type, dest_type2, nan_val
dtypes = [('float64', 'float64_t', 'float64_t', 'NAN'),
('float32', 'float32_t', 'float32_t', 'NAN'),
('int64', 'int64_t', 'int64_t', 'iNaT')]
('int64', 'int64_t', 'int64_t', 'iNaT'),
('object', 'object', 'object', 'NAN')]

def get_dispatch(dtypes):

Expand All @@ -350,7 +351,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
{{dest_type2}} val, count
{{dest_type2}} val
ndarray[{{dest_type2}}, ndim=2] resx
ndarray[int64_t, ndim=2] nobs

Expand All @@ -360,11 +361,19 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
raise AssertionError("len(index) != len(labels)")

nobs = np.zeros((<object> out).shape, dtype=np.int64)
{{if name=='object'}}
resx = np.empty((<object> out).shape, dtype=object)
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Was really hoping to not even have this conditional, but when trying resx = np.empty_like(out) and even resx = np.empty_like(out, dtype='object') it kept SegFaulting on objects

{{else}}
resx = np.empty_like(out)
{{endif}}

N, K = (<object> values).shape

{{if name == "object"}}
if True: # make templating happy
{{else}}
with nogil:
{{endif}}
for i in range(N):
lab = labels[i]
if lab < 0:
Expand All @@ -375,11 +384,7 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
val = values[i, j]

# not nan
{{if name == 'int64'}}
if val != {{nan_val}}:
{{else}}
if val == val and val != {{nan_val}}:
{{endif}}
nobs[lab, j] += 1
resx[lab, j] = val

Expand All @@ -390,7 +395,6 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
else:
out[i, j] = resx[i, j]


@cython.wraparound(False)
@cython.boundscheck(False)
def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
Expand All @@ -403,7 +407,7 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
{{dest_type2}} val, count
{{dest_type2}} val
ndarray[{{dest_type2}}, ndim=2] resx
ndarray[int64_t, ndim=2] nobs

Expand All @@ -413,11 +417,19 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
raise AssertionError("len(index) != len(labels)")

nobs = np.zeros((<object> out).shape, dtype=np.int64)
{{if name=='object'}}
resx = np.empty((<object> out).shape, dtype=object)
{{else}}
resx = np.empty_like(out)
{{endif}}

N, K = (<object> values).shape

{{if name == "object"}}
if True: # make templating happy
{{else}}
with nogil:
{{endif}}
for i in range(N):
lab = labels[i]
if lab < 0:
Expand All @@ -428,11 +440,7 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
val = values[i, j]

# not nan
{{if name == 'int64'}}
if val != {{nan_val}}:
{{else}}
if val == val and val != {{nan_val}}:
{{endif}}
nobs[lab, j] += 1
if nobs[lab, j] == rank:
resx[lab, j] = val
Expand Down
56 changes: 27 additions & 29 deletions pandas/tests/groupby/test_groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -2086,47 +2086,45 @@ def test_median_empty_bins(self):
expected = df.groupby(bins).agg(lambda x: x.median())
assert_frame_equal(result, expected)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you add a tests with the numeric ops on groupby / object and assert they raise

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Assuming you want this in a different change no problem. That said, do you consider the numeric ops to be add, prod, min, max, mean, median, var, ohlc, cumprod, cumsum, cummin, cummax and rank? The tests are one thing, but these don't all raise at the moment so would have to couple that with some refactoring of the groupby module

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes let's do this in another change.


def test_groupby_non_arithmetic_agg_types(self):
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(self, dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])

dtypes = ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']
df['b'] = df.b.astype(dtype)

grp_exp = {'first': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]},
'last': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]},
'min': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]},
'max': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]},
'nth': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]},
'count': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'}}
if 'args' not in data:
data['args'] = []

for dtype in dtypes:
df_in = df.copy()
df_in['b'] = df_in.b.astype(dtype)

for method, data in compat.iteritems(grp_exp):
if 'args' not in data:
data['args'] = []

if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype

exp = data['df']
df_out = pd.DataFrame(exp)
exp = data['df']
df_out = pd.DataFrame(exp)

df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)

grpd = df_in.groupby('a')
t = getattr(grpd, method)(*data['args'])
assert_frame_equal(t, df_out)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
assert_frame_equal(t, df_out)

def test_groupby_non_arithmetic_agg_intlike_precision(self):
# GH9311, GH6620
Expand Down