Skip to content

Commit 1b7f070

Browse files
committed
Merge pull request #3145 from jreback/transform
PERF: GH2121 groupby transform
2 parents 08672e3 + 2d81b64 commit 1b7f070

File tree

3 files changed

+84
-16
lines changed

3 files changed

+84
-16
lines changed

RELEASE.rst

+5-3
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,6 @@ pandas 0.11.0
100100
the collections.Mapping ABC.
101101
- Allow selection semantics via a string with a datelike index to work in both
102102
Series and DataFrames (GH3070_)
103-
- Improved performance across several core functions by taking memory
104-
ordering of arrays into account. Courtesy of @stephenwlin (GH3130_)
105-
106103

107104
.. ipython:: python
108105
@@ -116,6 +113,10 @@ pandas 0.11.0
116113
for plots. Based on https://gist.github.com/huyng/816622 (GH3075_).
117114

118115

116+
- Improved performance across several core functions by taking memory
117+
ordering of arrays into account. Courtesy of @stephenwlin (GH3130_)
118+
- Improved performance of groupby transform method (GH2121_)
119+
119120
**API Changes**
120121

121122
- Do not automatically upcast numeric specified dtypes to ``int64`` or
@@ -234,6 +235,7 @@ pandas 0.11.0
234235
.. _GH622: https://github.com/pydata/pandas/issues/622
235236
.. _GH797: https://github.com/pydata/pandas/issues/797
236237
.. _GH2758: https://github.com/pydata/pandas/issues/2758
238+
.. _GH2121: https://github.com/pydata/pandas/issues/2121
237239
.. _GH2809: https://github.com/pydata/pandas/issues/2809
238240
.. _GH2810: https://github.com/pydata/pandas/issues/2810
239241
.. _GH2837: https://github.com/pydata/pandas/issues/2837

pandas/core/groupby.py

+41-13
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from pandas.util.compat import OrderedDict
1414
import pandas.core.algorithms as algos
1515
import pandas.core.common as com
16-
from pandas.core.common import _possibly_downcast_to_dtype
16+
from pandas.core.common import _possibly_downcast_to_dtype, notnull
1717

1818
import pandas.lib as lib
1919
import pandas.algos as _algos
@@ -75,7 +75,7 @@ def f(self):
7575
def _first_compat(x, axis=0):
7676
def _first(x):
7777
x = np.asarray(x)
78-
x = x[com.notnull(x)]
78+
x = x[notnull(x)]
7979
if len(x) == 0:
8080
return np.nan
8181
return x[0]
@@ -89,7 +89,7 @@ def _first(x):
8989
def _last_compat(x, axis=0):
9090
def _last(x):
9191
x = np.asarray(x)
92-
x = x[com.notnull(x)]
92+
x = x[notnull(x)]
9393
if len(x) == 0:
9494
return np.nan
9595
return x[-1]
@@ -421,7 +421,7 @@ def ohlc(self):
421421

422422
def nth(self, n):
423423
def picker(arr):
424-
arr = arr[com.notnull(arr)]
424+
arr = arr[notnull(arr)]
425425
if len(arr) >= n + 1:
426426
return arr.iget(n)
427427
else:
@@ -1897,19 +1897,46 @@ def transform(self, func, *args, **kwargs):
18971897
gen = self.grouper.get_iterator(obj, axis=self.axis)
18981898

18991899
if isinstance(func, basestring):
1900-
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
1900+
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
1901+
slow_path = lambda group: group.apply(lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
19011902
else:
1902-
wrapper = lambda x: func(x, *args, **kwargs)
1903+
fast_path = lambda group: func(group, *args, **kwargs)
1904+
slow_path = lambda group: group.apply(lambda x: func(x, *args, **kwargs), axis=self.axis)
19031905

1906+
path = None
19041907
for name, group in gen:
19051908
object.__setattr__(group, 'name', name)
19061909

1907-
try:
1908-
res = group.apply(wrapper, axis=self.axis)
1909-
except TypeError:
1910-
return self._transform_item_by_item(obj, wrapper)
1911-
except Exception: # pragma: no cover
1912-
res = wrapper(group)
1910+
# decide on a fast path
1911+
if path is None:
1912+
1913+
path = slow_path
1914+
try:
1915+
res = slow_path(group)
1916+
1917+
# if we make it here, test if we can use the fast path
1918+
try:
1919+
res_fast = fast_path(group)
1920+
1921+
# compare that we get the same results
1922+
if res.shape == res_fast.shape:
1923+
res_r = res.values.ravel()
1924+
res_fast_r = res_fast.values.ravel()
1925+
mask = notnull(res_r)
1926+
if (res_r[mask] == res_fast_r[mask]).all():
1927+
path = fast_path
1928+
1929+
except:
1930+
pass
1931+
except TypeError:
1932+
return self._transform_item_by_item(obj, fast_path)
1933+
except Exception: # pragma: no cover
1934+
res = fast_path(group)
1935+
path = fast_path
1936+
1937+
else:
1938+
1939+
res = path(group)
19131940

19141941
# broadcasting
19151942
if isinstance(res, Series):
@@ -1925,7 +1952,8 @@ def transform(self, func, *args, **kwargs):
19251952
concat_index = obj.columns if self.axis == 0 else obj.index
19261953
concatenated = concat(applied, join_axes=[concat_index],
19271954
axis=self.axis, verify_integrity=False)
1928-
return concatenated.reindex_like(obj)
1955+
concatenated.sort_index(inplace=True)
1956+
return concatenated
19291957

19301958
def _transform_item_by_item(self, obj, wrapper):
19311959
# iterate through columns

vb_suite/groupby.py

+38
Original file line numberDiff line numberDiff line change
@@ -273,3 +273,41 @@ def f(g):
273273
"""
274274

275275
groupby_sum_booleans = Benchmark("df.groupby('ii').sum()", setup)
276+
277+
#----------------------------------------------------------------------
278+
# Transform testing
279+
280+
setup = common_setup + """
281+
n_dates = 1000
282+
n_securities = 500
283+
n_columns = 3
284+
share_na = 0.1
285+
286+
dates = date_range('1997-12-31', periods=n_dates, freq='B')
287+
dates = Index(map(lambda x: x.year * 10000 + x.month * 100 + x.day, dates))
288+
289+
secid_min = int('10000000', 16)
290+
secid_max = int('F0000000', 16)
291+
step = (secid_max - secid_min) // (n_securities - 1)
292+
security_ids = map(lambda x: hex(x)[2:10].upper(), range(secid_min, secid_max + 1, step))
293+
294+
data_index = MultiIndex(levels=[dates.values, security_ids],
295+
labels=[[i for i in xrange(n_dates) for _ in xrange(n_securities)], range(n_securities) * n_dates],
296+
names=['date', 'security_id'])
297+
n_data = len(data_index)
298+
299+
columns = Index(['factor{}'.format(i) for i in xrange(1, n_columns + 1)])
300+
301+
data = DataFrame(np.random.randn(n_data, n_columns), index=data_index, columns=columns)
302+
303+
step = int(n_data * share_na)
304+
for column_index in xrange(n_columns):
305+
index = column_index
306+
while index < n_data:
307+
data.set_value(data_index[index], columns[column_index], np.nan)
308+
index += step
309+
310+
f_fillna = lambda x: x.fillna(method='pad')
311+
"""
312+
313+
groupby_transform = Benchmark("data.groupby(level='security_id').transform(f_fillna)", setup)

0 commit comments

Comments
 (0)