From 29bc817bf43ad1509d8d551f133f3194234cb56f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 13 Mar 2020 09:58:29 -0700 Subject: [PATCH 1/3] loosen scalar check in Series.__getitem__ --- pandas/core/series.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pandas/core/series.py b/pandas/core/series.py index 2d8eb9b29498a..f55d162f6ba5a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -840,11 +840,15 @@ def _slice(self, slobj: slice, axis: int = 0) -> "Series": def __getitem__(self, key): key = com.apply_if_callable(key, self) + key = lib.item_from_zerodim(key) if key is Ellipsis: return self - key_is_scalar = is_scalar(key) + # check for is_list_like/slice instead of is_scalar to allow non-standard + # scalars through, e.g. cftime.datetime needed by xarray + # https://github.com/pydata/xarray/issues/3751 + key_is_scalar = not is_list_like(key) and not isinstance(key, slice) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) From 14e47abf68bca90e9267c4bf3e2b7216942eb67a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 16 Mar 2020 11:43:03 -0700 Subject: [PATCH 2/3] BUG: resample.agg with readonly data --- doc/source/whatsnew/v1.0.3.rst | 1 + pandas/_libs/groupby.pyx | 12 +++++++---- pandas/tests/resample/test_resample_api.py | 24 ++++++++++++++++++++++ 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.0.3.rst b/doc/source/whatsnew/v1.0.3.rst index 17f1bdc365518..482222fbddbb8 100644 --- a/doc/source/whatsnew/v1.0.3.rst +++ b/doc/source/whatsnew/v1.0.3.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in ``resample.agg`` when the underlying data is non-writeable (:issue:`31710`) .. _whatsnew_103.bug_fixes: diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 27b3095d8cb4f..35a6963165194 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -848,11 +848,13 @@ cdef inline bint _treat_as_na(rank_t val, bint is_datetimelike) nogil: return val != val +# GH#31710 use memorviews once cython 0.30 is released so we can +# use `const rank_t[:, :] values` @cython.wraparound(False) @cython.boundscheck(False) def group_last(rank_t[:, :] out, int64_t[:] counts, - rank_t[:, :] values, + ndarray[rank_t, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): """ @@ -937,11 +939,13 @@ def group_last(rank_t[:, :] out, raise RuntimeError("empty group with uint64_t") +# GH#31710 use memorviews once cython 0.30 is released so we can +# use `const rank_t[:, :] values` @cython.wraparound(False) @cython.boundscheck(False) def group_nth(rank_t[:, :] out, int64_t[:] counts, - rank_t[:, :] values, + ndarray[rank_t, ndim=2] values, const int64_t[:] labels, int64_t rank=1, Py_ssize_t min_count=-1): """ @@ -1235,7 +1239,7 @@ ctypedef fused groupby_t: @cython.boundscheck(False) def group_max(groupby_t[:, :] out, int64_t[:] counts, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): """ @@ -1308,7 +1312,7 @@ def group_max(groupby_t[:, :] out, @cython.boundscheck(False) def group_min(groupby_t[:, :] out, int64_t[:] counts, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): """ diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index d552241f9126f..6389c88c99f73 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -580,3 +580,27 @@ def test_agg_with_datetime_index_list_agg_func(col_name): columns=pd.MultiIndex(levels=[[col_name], ["mean"]], codes=[[0], [0]]), ) tm.assert_frame_equal(result, expected) + + +def test_resample_agg_readonly(): + # GH#31710 cython needs to allow readonly data + index = pd.date_range("2020-01-01", "2020-01-02", freq="1h") + arr = np.zeros_like(index) + arr.setflags(write=False) + + ser = pd.Series(arr, index=index) + rs = ser.resample("1D") + + expected = pd.Series([pd.Timestamp(0), pd.Timestamp(0)], index=index[::24]) + + result = rs.agg("last") + tm.assert_series_equal(result, expected) + + result = rs.agg("first") + tm.assert_series_equal(result, expected) + + result = rs.agg("max") + tm.assert_series_equal(result, expected) + + result = rs.agg("min") + tm.assert_series_equal(result, expected) From 91d55c61ad5a4651364fdaa0dfc78a7cef348e43 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 16 Mar 2020 15:44:38 -0700 Subject: [PATCH 3/3] revert --- pandas/core/series.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pandas/core/series.py b/pandas/core/series.py index de0ebac165f23..501555aee21b7 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -840,15 +840,11 @@ def _slice(self, slobj: slice, axis: int = 0) -> "Series": def __getitem__(self, key): key = com.apply_if_callable(key, self) - key = lib.item_from_zerodim(key) if key is Ellipsis: return self - # check for is_list_like/slice instead of is_scalar to allow non-standard - # scalars through, e.g. cftime.datetime needed by xarray - # https://github.com/pydata/xarray/issues/3751 - key_is_scalar = not is_list_like(key) and not isinstance(key, slice) + key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key)